serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
7,601 | #include <iostream>
__global__ void myfirstkernel() {}
int main() {
myfirstkernel<<<1, 1>>>();
printf("Hello world");
return 0;
} |
7,602 | // test.cu
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
__global__ void calcEccentricity(double *r, double *v, double *m, double *ecc, int numParticles)
{
//size_t id = blockIdx.x * blockDim.x + threadIdx.x;
double L[3]; // angular momentum
double eccTemp[3]; // hold components of eccentricity vector
double mu; // standard gravitational parameter
double invdist; // inverse distance between particle and central planet
//if (id < numParticles - 1)
for (int id = 0; id < numParticles - 1; id++)
{
mu = m[0] + m[id+1];
invdist = rsqrt((r[3*(id+1)]-r[0])*(r[3*(id+1)]-r[0])+\
(r[3*(id+1)+1]-r[1])*(r[3*(id+1)+1]-r[1])+\
(r[3*(id+1)+2]-r[2])*(r[3*(id+1)+2]-r[2]));
L[0] = (r[3*(id+1)+1]-r[1])*v[3*(id+1)+2] - (r[3*(id+1)+2]-r[2])*v[3*(id+1)+1];
L[1] = (r[3*(id+1)+2]-r[2])*v[3*(id+1)] - (r[3*(id+1)]-r[0])*v[3*(id+1)+2];
L[2] = (r[3*(id+1)]-r[0])*v[3*(id+1)+1] - (r[3*(id+1)+1]-r[1])*v[3*(id+1)];
eccTemp[0] = (1./mu) * (v[3*(id+1)+1]*L[2] - v[3*(id+1)+2]*L[1]) - (r[3*(id+1)]-r[0]) * invdist;
eccTemp[1] = (1./mu) * (v[3*(id+1)+2]*L[0] - v[3*(id+1)]*L[2]) - (r[3*(id+1)+1]-r[1]) * invdist;
eccTemp[2] = (1./mu) * (v[3*(id+1)]*L[1] - v[3*(id+1)+1]*L[0]) - (r[3*(id+1)+2]-r[2]) * invdist;
ecc[id] = sqrt(eccTemp[0]*eccTemp[0] + eccTemp[1]*eccTemp[1] + eccTemp[2]*eccTemp[2]); // real eccentricity
}
}
int main()
{
int numParticles = 2;
size_t N_bytes = 3 * numParticles * sizeof(double);
double *r_h = (double*)malloc(N_bytes);
double *v_h = (double*)malloc(N_bytes);
double *m_h = (double*)malloc(N_bytes/3);
double *ecc_h = (double*)malloc(N_bytes/3);
r_h[0] = 0, r_h[1] = 0, r_h[2] = 0, r_h[3] = 0.1882315144676964, r_h[4] = 0, r_h[5] = 0;
v_h[0] = 0, v_h[1] = 0, v_h[2] = 0, v_h[3] = 0, v_h[4] = 2.2517605710860709, v_h[5] = 0;
m_h[0] = 1, m_h[1] = 0.0000002100632244;
ecc_h[0] = 0, ecc_h[1] = 0;
printf("R\n");
for (int i = 0; i < numParticles; i++)
{
printf("%.16lf %.16lf %.16lf\n", r_h[3*i], r_h[3*i+1], r_h[3*i+2]);
}
printf("V\n");
for (int i = 0; i < numParticles; i++)
{
printf("%.16lf %.16lf %.16lf\n", v_h[3*i], v_h[3*i+1], v_h[3*i+2]);
}
printf("M\n");
printf("%.16lf %.16lf\n", m_h[0], m_h[1]);
printf("Initial Eccentricity Array\n");
printf("%.16lf %.16lf\n", ecc_h[0], ecc_h[1]);
printf("numParticles = %d\n", numParticles);
// Allocate arrays on device
double *r_d, *v_d, *m_d, *ecc_d;
cudaMalloc((void**) &r_d, N_bytes);
cudaMalloc((void**) &v_d, N_bytes);
cudaMalloc((void**) &m_d, N_bytes/3);
cudaMalloc((void**) &ecc_d, N_bytes/3);
// Copy arrays from host to device
cudaMemcpy(r_d, r_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(m_d, m_h, N_bytes/3, cudaMemcpyHostToDevice);
cudaMemcpy(ecc_d, ecc_h, N_bytes/3, cudaMemcpyHostToDevice);
calcEccentricity<<<1, 1>>>(r_d, v_d, m_d, ecc_d, numParticles);
CudaCheckError();
cudaMemcpy(ecc_h, ecc_d, N_bytes, cudaMemcpyDeviceToHost);
printf("Updated Eccentricity\n");
printf("%.16lf %.16lf\n", ecc_h[0], ecc_h[1]);
printf("What the eccentricity should be\n");
printf("0.0455862977217524\n");
cudaFree(r_d);
cudaFree(v_d);
cudaFree(m_d);
cudaFree(ecc_d);
free(r_h);
free(v_h);
free(m_h);
free(ecc_h);
return 0;
}
/*extern "C" {
void testrun(double *r_h, double *v_h, double *m_h, int numParticles, double *ecc_h)
{
size_t N_bytes = 3 * numParticles * sizeof(double);
printf("R\n");
for (int i = 0; i < numParticles; i++)
{
printf("%.16lf %.16lf %.16lf\n", r_h[3*i], r_h[3*i+1], r_h[3*i+2]);
}
printf("V\n");
for (int i = 0; i < numParticles; i++)
{
printf("%.16lf %.16lf %.16lf\n", v_h[3*i], v_h[3*i+1], v_h[3*i+2]);
}
printf("M\n");
printf("%.16lf %.16lf\n", m_h[0], m_h[1]);
printf("Initial Eccentricity Array\n");
printf("%.16lf %.16lf\n", ecc_h[0], ecc_h[1]);
printf("numParticles = %d\n", numParticles);
// Allocate arrays on device
double *r_d, *v_d, *m_d, *ecc_d;
cudaMalloc((void**) &r_d, N_bytes);
cudaMalloc((void**) &v_d, N_bytes);
cudaMalloc((void**) &m_d, N_bytes/3);
cudaMalloc((void**) &ecc_d, N_bytes/3);
// Copy arrays from host to device
cudaMemcpy(r_d, r_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(m_d, m_h, N_bytes/3, cudaMemcpyHostToDevice);
cudaMemcpy(ecc_d, ecc_h, N_bytes/3, cudaMemcpyHostToDevice);
calcEccentricity<<<1, 1>>>(r_d, v_d, m_d, ecc_d, numParticles);
cudaDeviceSynchronize();
CudaCheckError();
cudaMemcpy(ecc_h, ecc_d, N_bytes, cudaMemcpyDeviceToHost);
printf("Updated Eccentricity\n");
printf("%.16lf %.16lf\n", ecc_h[0], ecc_h[1]);
printf("What the eccentricity should be\n");
printf("0.0455862977217524\n");
cudaFree(r_d);
cudaFree(v_d);
cudaFree(m_d);
cudaFree(ecc_d);
}
}*/
|
7,603 | #include "includes.h"
__global__ void dot_product(float *a, float *b, float *c)
{
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
} |
7,604 | //pass
//--blockDim=[8,8] --gridDim=[1,1]
#include <cuda.h>
#define _2D_ACCESS(A, y, x, X_DIM) A[(y)*(X_DIM)+(x)]
//////////////////////////////////////////////////////////////////////////////
//// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
//// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
//// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
//// PARTICULAR PURPOSE.
////
//// Copyright (c) Microsoft Corporation. All rights reserved
//////////////////////////////////////////////////////////////////////////////
//----------------------------------------------------------------------------
// File: TransitiveClosure.cpp
//
// Contains the implementation of algorithms which explores connectivity between
// nodes in a graph and determine shortest path.
// This is based on paper http://www.seas.upenn.edu/~kiderj/research/papers/APSP-gh08-fin-T.pdf
//----------------------------------------------------------------------------
// Defines to help with AMP->OpenCL translation
#define X_DIMENSION 0
#define Y_DIMENSION 1
// Constants - specifies tile size
#define TILE_SIZE (1 << 3)
// State of connection
#define UNCONNECTED 0
#define DIRECTLY_CONNECTED 1
#define INDIRECTLY_CONNECTED 2
#define num_vertices (1 << 6)
//----------------------------------------------------------------------------
// Stage2 - determine connectivity between vertexs' between 2 TILE - primary
// and current - current is along row or column of primary
//----------------------------------------------------------------------------
__global__ void transitive_closure_stage2_kernel(unsigned int* graph, int passnum)
{
// Load primary block into shared memory (primary_block_buffer)
__shared__ unsigned int primary_block_buffer[TILE_SIZE][TILE_SIZE];
int idxY = passnum * TILE_SIZE + threadIdx.y;
int idxX = passnum * TILE_SIZE + threadIdx.x;
primary_block_buffer[threadIdx.y][threadIdx.x] = _2D_ACCESS(graph, idxY, idxX, num_vertices);
// Load the current block into shared memory (curr_block_buffer)
__shared__ unsigned int curr_block_buffer[TILE_SIZE][TILE_SIZE];
unsigned int group_id0, group_id1;
if (blockIdx.y == 0)
{
group_id0 = passnum;
if (blockIdx.x < passnum)
{
group_id1 = blockIdx.x;
}
else
{
group_id1 = blockIdx.x + 1;
}
}
else
{
group_id1 = passnum;
if (blockIdx.x < passnum)
{
group_id0 = blockIdx.x;
}
else
{
group_id0 = blockIdx.x + 1;
}
}
idxY = group_id0 * TILE_SIZE + threadIdx.y;
idxX = group_id1 * TILE_SIZE + threadIdx.x;
curr_block_buffer[threadIdx.y][threadIdx.x] = _2D_ACCESS(graph, idxY, idxX, num_vertices);
#ifndef MUTATION
/* BUGINJECT: REMOVE_BARRIER, DOWN */
__syncthreads();
#endif
// Now perform the actual Floyd-Warshall algorithm on this block
for (unsigned int k = 0;
k < TILE_SIZE; ++k)
{
if ( curr_block_buffer[threadIdx.y][threadIdx.x] == UNCONNECTED)
{
if (blockIdx.y == 0)
{
if ( (primary_block_buffer[threadIdx.y][k] != UNCONNECTED) && (curr_block_buffer[k][threadIdx.x] != UNCONNECTED) )
{
curr_block_buffer[threadIdx.y][threadIdx.x] = passnum*TILE_SIZE + k + INDIRECTLY_CONNECTED;
}
}
else
{
if ( (curr_block_buffer[threadIdx.y][k] != UNCONNECTED) && (primary_block_buffer[k][threadIdx.x] != UNCONNECTED) )
{
curr_block_buffer[threadIdx.y][threadIdx.x] = passnum*TILE_SIZE + k + INDIRECTLY_CONNECTED;
}
}
}
__syncthreads();
}
_2D_ACCESS(graph, idxY, idxX, num_vertices) = curr_block_buffer[threadIdx.y][threadIdx.x];
}
|
7,605 | #include <iostream>
#include <cstdlib>
#include <math.h>
/*
dst0 = src + 1;
dst1 = dst0 + 1;
dst1 = src + 2;
dst2 = dst1 + 1;
dst2 = dst0 + 2;
dst2 = src + 3;
*/
// Kernel function to copy the elements
// of one array to two more arrays.
__global__
void cpy_float4(int n, float4 *src, float4 *dst0, float4 *dst1, float4 *dst2)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
dst0[i].x = src[i].x + 1.0;
dst0[i].y = src[i].y + 1.0;
dst0[i].z = src[i].z + 1.0;
dst0[i].w = src[i].w + 1.0;
dst1[i].x = dst0[i].x + 1.0;
dst1[i].y = dst0[i].y + 1.0;
dst1[i].z = dst0[i].z + 1.0;
dst1[i].w = dst0[i].w + 1.0;
dst2[i].x = dst1[i].x + 1.0;
dst2[i].y = dst1[i].y + 1.0;
dst2[i].z = dst1[i].z + 1.0;
dst2[i].w = dst1[i].w + 1.0;
}
}
__global__
void fill_float4(int n, float4 *dst)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
dst[i].x = 0;
dst[i].y = 0;
dst[i].z = 0;
dst[i].w = 0;
}
}
int main(void)
{
int N = 1 << 20;
float4 *x, *y, *z, *w;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N * sizeof(float4));
cudaMallocManaged(&y, N * sizeof(float4));
cudaMallocManaged(&z, N * sizeof(float4));
cudaMallocManaged(&w, N * sizeof(float4));
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// initialize x
fill_float4<<<numBlocks, blockSize>>>(N, x);
// Run kernel on 1M elements on the GPU
cpy_float4<<<numBlocks, blockSize>>>(N, x, y, z, w);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
double maxError0 = 0.0;
double maxError1 = 0.0;
double maxError2 = 0.0;
double maxError3 = 0.0;
for (int i = 0; i < N; i++){
maxError0 = fmax(maxError0, fabs(y[i].x - 1.0));
maxError1 = fmax(maxError1, fabs(y[i].y - 1.0));
maxError2 = fmax(maxError2, fabs(y[i].z - 1.0));
maxError3 = fmax(maxError3, fabs(y[i].w - 1.0));
maxError0 = fmax(maxError0, fabs(z[i].x - 2.0));
maxError1 = fmax(maxError1, fabs(z[i].y - 2.0));
maxError2 = fmax(maxError2, fabs(z[i].z - 2.0));
maxError3 = fmax(maxError3, fabs(z[i].w - 2.0));
maxError0 = fmax(maxError0, fabs(w[i].x - 3.0));
maxError1 = fmax(maxError1, fabs(w[i].y - 3.0));
maxError2 = fmax(maxError2, fabs(w[i].z - 3.0));
maxError3 = fmax(maxError3, fabs(w[i].w - 3.0));
}
std::cout << "Max error0: " << maxError0 << std::endl;
std::cout << "Max error1: " << maxError1 << std::endl;
std::cout << "Max error2: " << maxError2 << std::endl;
std::cout << "Max error3: " << maxError3 << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
cudaFree(z);
cudaFree(w);
return 0;
}
|
7,606 | #include <stdio.h>
#include <cuda.h>
#define BLOCKSIZE 1024
__global__ void dkernel() {
__shared__ unsigned s;
if (threadIdx.x == 0) s = 0;
__syncthreads();
if (threadIdx.x == 1) s += 1;
__syncthreads();
if (threadIdx.x == 100) s += 2;
__syncthreads();
if (threadIdx.x == 0) printf("s=%d\n", s);
}
int main() {
int i;
for (i = 0; i < 10; ++i) {
dkernel<<<2, BLOCKSIZE>>>();
cudaDeviceSynchronize();
}
}
|
7,607 | #include "includes.h"
__global__ void awkward_ByteMaskedArray_getitem_nextcarry_outindex_filter_mask(int8_t* mask, bool validwhen, int64_t length) {
int64_t block_id =
blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int64_t thread_id = block_id * blockDim.x + threadIdx.x;
if(thread_id < length) {
if ((mask[thread_id] != 0) == validwhen) {
mask[thread_id] = 1;
}
}
} |
7,608 | #include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <iostream>
#include <stdio.h>
using namespace std;
int main(int argc, char *argv[]) {
std::string salt = "";
std::string password = "";
if (argc < 3) {
std::cout << "!!! ERROR !!! Please enter salt and password strings as arguments !!!\n";
return EXIT_FAILURE;
}
salt = argv[1];
password = argv[2];
if (salt.length() != 8) {
std::cout << "!!! ERROR !!! Salt must be eight characters long !!!\n";
return EXIT_FAILURE;
}
salt = "$6$" + salt + "$";
string testHash = crypt((char*) password.c_str(), (char*) salt.c_str());
testHash = testHash.substr(0,76) + "$";
cout << "User entered information...\n";
cout << "Salt: " << salt << endl;
cout << "Password: " << password << endl;
cout << "Calculated hash...\n";
cout << testHash << endl;
return EXIT_SUCCESS;
}
|
7,609 | #include <iostream>
#include <stdlib.h>
#include <ctime>
#include <vector>
#include <curand.h>
#include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#define MIN(a, b) (((a)<(b)?(a):(b)))
__device__
float source(float rand_b) { return rand_b; }
__device__
float f_gene(float mu, float sigma, float rand_a, float rand_b)
{
return -mu * logf( rand_a ) / sigma + source(rand_b);
}
__global__
void trajs(float mu, float sigma, float* parts, unsigned nb_parts, float* rands_a, float* rands_b )
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
if (x>=nb_parts) return;
parts[x] = f_gene(mu, sigma, rands_a[x], rands_b[x]);
}
__global__
void make_distrib(float* parts,
unsigned nb_parts,
unsigned* distrib,
unsigned nb_segs,
unsigned* below,
unsigned* above,
float min,
float max,
unsigned nb_threads)
{
unsigned x = threadIdx.x+blockIdx.x*blockDim.x;
if (x>=nb_threads) return;
unsigned range_size = floorf((float) nb_parts/nb_threads),
i = x*range_size;
int seg = floorf( (float) (parts[i]-min)/(max-min)*nb_segs );
for (i++; i<(x+1)*range_size; i++){
if ( floorf( (float) (parts[i]-min)/(max-min)*nb_segs ) > seg )
seg = (int) floorf( (float) (parts[i]-min)/(max-min)*nb_segs );
if ( seg<0 ) *below++;
else if ( seg>nb_segs ) *above++;
else distrib[ seg ]++;
}
}
int main(int argc, char **argv)
{
if (argc!=5) return -1;
float mu = atof(argv[1]),
sigma = atof(argv[2]);
unsigned nb_parts = atoi(argv[3]),
nb_segs = atoi(argv[4]);
float* parts,
*rands_a,
*rands_b;
cudaMalloc(&parts, sizeof(float)*nb_parts);
cudaMalloc(&rands_a, sizeof(float)*nb_parts);
cudaMalloc(&rands_b, sizeof(float)*nb_parts);
dim3 blockSize(512),
gridSize(ceil((float) nb_parts/512));
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, time(NULL));
curandGenerateUniform(gen, rands_a, nb_parts);
curandGenerateUniform(gen, rands_b, nb_parts);
trajs<<<gridSize, blockSize>>>(mu, sigma, parts, nb_parts, rands_a, rands_b);
thrust::sort(thrust::device, parts, parts+nb_parts);
unsigned* distrib,
*above,
*below;
cudaMalloc(&distrib, sizeof(unsigned)*nb_segs);
cudaMalloc(&below, sizeof(unsigned));
cudaMalloc(&above, sizeof(unsigned));
make_distrib<<<gridSize, blockSize>>>(parts,
nb_parts,
distrib,
nb_segs,
below,
above,
0, 1,
MIN(nb_segs/2, nb_parts/2));
std::vector<unsigned> h_distrib (nb_segs);
cudaMemcpy(h_distrib.data(), distrib, sizeof(unsigned)*nb_segs, cudaMemcpyDeviceToHost);
// for (int i=0; i<nb_segs; i++)
// std::cout << (float) i/nb_segs << " " << h_distrib.at(i) << std::endl;
return 0;
} |
7,610 | #include "oFAST.cuh"
__device__ unsigned int g_counter = 0;
// This function returns
// 1 if v is greater than x + th
// 2 if v is less than x - th
// 0 if v is between x + th and x - th
__device__ __forceinline__ int diffType(const int v, const int x, const int th)
{
const int diff = x - v;
return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1);
}
// mask1/2 light/dark
__device__ void calcMask(const int C[4], const int v, const int th, uint16_t& mask1, uint16_t& mask2)
{
mask1 = 0; // only cares about bright one
mask2 = 0; // only cares about dark
int d1, d2;
d1 = diffType(v, C[0] & 0xff, th);
d2 = diffType(v, C[2] & 0xff, th);
if ((d1 | d2) == 0) // if both sides are between the thresholds
return;
mask1 |= (d1 & 1) << 0;
// because we're shifting 2'b10 left, we need to shift one back right
mask2 |= ((d1 & 2) >> 1) << 0;
mask1 |= (d2 & 1) << 8;
mask2 |= ((d2 & 2) >> 1) << 8;
d1 = diffType(v, C[1] & 0xff, th);
d2 = diffType(v, C[3] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 4;
mask2 |= ((d1 & 2) >> 1) << 4;
mask1 |= (d2 & 1) << 12;
mask2 |= ((d2 & 2) >> 1) << 12;
// end of four corners
d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 2;
mask2 |= ((d1 & 2) >> 1) << 2;
mask1 |= (d2 & 1) << 10;
mask2 |= ((d2 & 2) >> 1) << 10;
d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 6;
mask2 |= ((d1 & 2) >> 1) << 6;
mask1 |= (d2 & 1) << 14;
mask2 |= ((d2 & 2) >> 1) << 14;
d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 1;
mask2 |= ((d1 & 2) >> 1) << 1;
mask1 |= (d2 & 1) << 9;
mask2 |= ((d2 & 2) >> 1) << 9;
d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 3;
mask2 |= ((d1 & 2) >> 1) << 3;
mask1 |= (d2 & 1) << 11;
mask2 |= ((d2 & 2) >> 1) << 11;
d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 5;
mask2 |= ((d1 & 2) >> 1) << 5;
mask1 |= (d2 & 1) << 13;
mask2 |= ((d2 & 2) >> 1) << 13;
d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th);
mask1 |= (d1 & 1) << 7;
mask2 |= ((d1 & 2) >> 1) << 7;
mask1 |= (d2 & 1) << 15;
mask2 |= ((d2 & 2) >> 1) << 15;
}
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> not a keypoint
// popc counts the number of 1's
__device__ __forceinline__ bool isKeyPoint(uint16_t mask1, uint16_t mask2, uint8_t *shared_table)
{
return (__popc(mask1) > 8 && (shared_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||
(__popc(mask2) > 8 && (shared_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));
}
// This is my kernel
__global__ void calcKeyPoints(uint8_t* image, int rows, int cols, int threshold, float *data, int arr_size, int k, int *x_data, int *y_data, uint8_t *ctable_gpu)
{
extern __shared__ uint8_t shared_table[];
for (int ind = threadIdx.x; ind < 8129; ind+=blockDim.x)
{
shared_table[ind] = ctable_gpu[ind];
}
const int j = threadIdx.x + blockIdx.x * blockDim.x + 10;
const int i = threadIdx.y + blockIdx.y * blockDim.y + 10;
//printf("%d %d\n", i, j);
for (int a = 0; a < k; a++)
{
int next = a * rows * cols;
uint8_t* img = image + next;
if (i < rows - 10 && j < cols - 10)
{
int i_minus_three = cols*(i-3);
int j_minus_three = (j-3);
int j_plus_three = (j+3);
int i_plus_three = cols * (i + 3);
int v;
int C[4] = {0,0,0,0};
C[2] |= static_cast<uint8_t>(img[i_minus_three + (j - 1)]) << 8;
C[2] |= static_cast<uint8_t>(img[i_minus_three + (j)]);
C[1] |= static_cast<uint8_t>(img[i_minus_three + (j + 1)]) << (3 * 8);
C[2] |= static_cast<uint8_t>(img[cols*(i - 2) + (j - 2)]) << (2 * 8);
C[1] |= static_cast<uint8_t>(img[cols*(i - 2) + (j + 2)]) << (2 * 8);
C[2] |= static_cast<uint8_t>(img[cols*(i - 1) + j_minus_three]) << (3 * 8);
C[1] |= static_cast<uint8_t>(img[cols*(i - 1) + j_plus_three]) << 8;
C[3] |= static_cast<uint8_t>(img[cols * (i) + j_minus_three]);
v = static_cast<uint8_t>(img[cols * (i) + (j)]);
C[1] |= static_cast<uint8_t>(img[cols * (i) + j_plus_three]);
// Checking both sides
int d1 = diffType(v, C[1] & 0xff, threshold);
int d2 = diffType(v, C[3] & 0xff, threshold);
if ((d1 | d2) == 0)
{
return;
}
C[3] |= static_cast<uint8_t>(img[cols * (i + 1) + j_minus_three]) << 8;
C[0] |= static_cast<uint8_t>(img[cols * (i + 1) + j_plus_three]) << (3 * 8);
C[3] |= static_cast<uint8_t>(img[cols * (i + 2) + (j - 2)]) << (2 * 8);
C[0] |= static_cast<uint8_t>(img[cols * (i + 2) + (j + 2)]) << (2 * 8);
C[3] |= static_cast<uint8_t>(img[i_plus_three + (j - 1)]) << (3 * 8);
C[0] |= static_cast<uint8_t>(img[i_plus_three + (j)]);
C[0] |= static_cast<uint8_t>(img[i_plus_three + (j + 1)]) << 8;
uint16_t mask1 = 0;
uint16_t mask2 = 0;
calcMask(C, v, threshold, mask1, mask2);
if (isKeyPoint(mask1, mask2, shared_table))
{
unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1));
//printf("%d\n", ind);
//printf("%d %d\n", i, j);
if (ind < k * arr_size)
{
x_data[ind] = i;
y_data[ind] = j;
#pragma unroll
for (int b = 0; b < 100; b++)
{
// Getting the patch
data[(ind*100)+b] = static_cast<float>(img[cols*(i+4-(b/10))+(j+(-4+(b%10)))]);
}
}
}
}
}
}
void gpu_oFAST(uint8_t* image, int rows, int cols, int threshold, float4 *data_out, int arr_size, int k, int *x_data, int *y_data)
{
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(rows - 6, block.x);
grid.y = divUp(cols - 6, block.y);
// Memory allocation for c_table
uint8_t *ctable_gpu;
cudaMallocManaged(&ctable_gpu, 8129);
for (int i = 0; i < 8129; i++)
{
ctable_gpu[i] = c_table[i];
}
// Memory allocation for output brightness data
float *gpu_data;
cudaMallocManaged(&gpu_data, k * 100 * arr_size * sizeof(float));
calcKeyPoints<<<grid, block, 8129>>>(image, rows, cols, threshold, gpu_data, arr_size, k, x_data, y_data, ctable_gpu);
cudaDeviceSynchronize();
int c = 0;
int i = 0;
int j = 0;
// Putting in float4
while (c < arr_size * k)
{
while (j < 24)
{
data_out[24*c+j] = make_float4(gpu_data[100*c+i], gpu_data[100*c+i+1], gpu_data[100*c+i+2], gpu_data[100*c+i+3]);
i = i + 4;
j++;
}
i = 0;
j = 0;
c++;
}
}
// test pipeline integration
void pipeline_print_oFAST(){ printf("oFAST Module active!\n");};
|
7,611 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float* var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29) {
for (int i=0; i < var_1; ++i) {
comp = (var_5 - var_6);
var_2[i] = (+1.2434E34f / (var_7 / (var_8 - var_9)));
comp += var_2[i] + (var_10 - -1.2385E-42f + (var_11 * +0.0f / (-1.9711E34f * +1.8458E34f)));
for (int i=0; i < var_3; ++i) {
comp = -0.0f + (var_12 + (-1.2628E-44f / sinf(-1.0453E34f - +1.3967E34f + (var_13 * (var_14 / -1.9112E-43f)))));
comp += var_15 / var_16;
comp = (+1.4984E26f + (-1.6015E-44f - ceilf(fmodf((var_17 * +1.0433E-44f), +1.5480E35f))));
comp += -1.5524E34f / (var_18 - +1.6725E34f + var_19);
}
for (int i=0; i < var_4; ++i) {
comp += (var_20 - var_21);
comp = (var_22 - var_23 + -0.0f);
}
if (comp >= (-1.6943E29f - var_24 / (var_25 / -1.8487E-42f))) {
comp = expf(-1.1615E34f);
comp = +1.8922E-42f - var_26 + (+0.0f * (-1.8922E34f - (var_27 * var_28)));
float tmp_1 = -1.2620E-41f;
comp += tmp_1 - (-1.5293E-42f * -1.8979E-43f * -0.0f + +0.0f * var_29 * -0.0f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float* tmp_3 = initPointer( atof(argv[3]) );
int tmp_4 = atoi(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30);
cudaDeviceSynchronize();
return 0;
}
|
7,612 | #include <cstdio>
#include <cstdlib>
#include <math.h>
#include <time.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK 256
#define PRINT_TIME 1
#define SM_ARR_LEN 2000
#define NUM_BLOCKS (SM_ARR_LEN*SM_ARR_LEN + NUM_THREADS_PER_BLOCK-1)/NUM_THREADS_PER_BLOCK
#define TOL 1//0.00001
#define OMEGA 1.60
#define IMUL(a, b) __mul24(a, b)
void initializeArray1D(float *arr, int len, int seed);
__global__ void kernel_sor (int buf_dim, float* buf) {
/*const int tid = threadIdx.x;
const int tjd = threadIdx.y;
//const int threadN = IMUL(blockDim.x, gridDim.x);
int in ; //threadIdx.x;
int jn ; //threadIdx.y;
float change, mean_change = 100;
for(int i = 0; i<2000; i++) {
mean_change = 0;
for (in = tid; in < arrLen ; in+= threadIdx.x){
for (jn = tjd; jn < arrLen ; jn+= threadIdx.y){
change = result[in*arrLen+jn] - .25 * (result[(in-1)*arrLen+jn] +
result[(in+1)*arrLen+jn] +
result[in*arrLen+jn+1] +
result[in*arrLen+jn-1]);
result[in*arrLen+jn] -= change * OMEGA;
if (change < 0){
change = -change;
}
mean_change += change;
}
}
}*/
int block_x_len = buf_dim / gridDim.x;
int thread_x_len = block_x_len / blockDim.x;
int x_offset = block_x_len * blockIdx.x + thread_x_len * threadIdx.x;
int block_y_len = buf_dim / gridDim.y;
int thread_y_len = block_y_len / blockDim.y;
int y_offset = block_y_len * blockIdx.y + thread_y_len * threadIdx.y;
int x_start = x_offset + (x_offset == 0 ? 1 : 0);
int x_bound = x_offset + thread_x_len - (x_offset + thread_x_len == buf_dim ? 1 : 0);
int y_start = y_offset + (y_offset == 0 ? 1 : 0);
int y_bound = y_offset + thread_y_len - (y_offset + thread_y_len == buf_dim ? 1 : 0);
for (int itr = 0; itr < 2000; itr++)
{
for (int i = x_start; i < x_bound; i++)
{
for (int j = y_start; j < y_bound; j++)
{
buf[i * buf_dim + j] = 0.25 * (
buf[(i + 1) * buf_dim + j]
+ buf[(i - 1) * buf_dim + j]
+ buf[i * buf_dim + j + 1]
+ buf[i * buf_dim + j - 1]
);
}
}
}
}
int main(int argc, char **argv){
int arrLen = 0;
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
// Arrays on GPU global memoryc
float *d_x;
float *d_y;
float *d_result;
// Arrays on the host memory
float *h_x;
float *h_y;
float *h_result;
float *h_result_gold;
int i, errCount = 0, zeroCount = 0;
int j;
if (argc > 1) {
arrLen = atoi(argv[1]);
}
else {
arrLen = SM_ARR_LEN;
}
printf("Length of the array = %d\n", arrLen);
// Select GPU
CUDA_SAFE_CALL(cudaSetDevice(0));
// Allocate GPU memory
size_t allocSize = arrLen*arrLen * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_x, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_y, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result, allocSize));
// Allocate arrays on host memory
h_x = (float *) malloc(allocSize);
h_y = (float *) malloc(allocSize);
h_result = (float *) malloc(allocSize);
h_result_gold = (float *) malloc(allocSize);
// Initialize the host arrays
printf("\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArray1D(h_x, arrLen, 2453);
//initializeArray1D(h_y, arrLen*arrLen, 1467);
initializeArray1D(h_result, arrLen, 2453);
initializeArray1D(h_result_gold, arrLen, 1467);
printf("\t... done\n\n");
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record event on the default stream
cudaEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_result, h_result, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_y, h_y, allocSize, cudaMemcpyHostToDevice));
// Launch the kernel
dim3 dimBlock(16,16);
kernel_sor<<<NUM_BLOCKS, dimBlock>>>(arrLen, d_result);
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_result, d_result, allocSize, cudaMemcpyDeviceToHost));
#if PRINT_TIME
// Stop and destroy the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
clock_t begin;
int change;
// Compute the results on the host
for( int k = 0;k<2000;k++){
for (i = 1; i < arrLen-1; i++){
for (j = 1; j < arrLen-1; j++) {
change = h_result_gold[i*arrLen+j] - .25 * (h_result_gold[(i-1)*arrLen+j] +
h_result_gold[(i+1)*arrLen+j] +
h_result_gold[i*arrLen+j+1] +
h_result_gold[i*arrLen+j-1]);
h_result_gold[i*arrLen+j] -= change * OMEGA;
}
}
}
clock_t ending;
// Compare the results
/*
for(i = 0; i < arrLen*arrLen; i++) {
if (abs(h_result_gold[i] - h_result[i]) > TOL) {
errCount++;
}
if (h_result[i] == 0) {
zeroCount++;
}
}
*/
double cpu_time = ((double) (ending - begin)) / CLOCKS_PER_SEC;
printf("fun() took %f seconds to execute \n", cpu_time);
for(i = 0; i < 50; i++) {
printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]);
}
if (errCount > 0) {
printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
printf("\nTEST PASSED: All results matched\n");
}
// Free-up device and host memory
CUDA_SAFE_CALL(cudaFree(d_x));
CUDA_SAFE_CALL(cudaFree(d_y));
CUDA_SAFE_CALL(cudaFree(d_result));
free(h_x);
free(h_y);
free(h_result);
return 0;
}
void initializeArray1D(float *arr, int len, int seed) {
int i;
int j;
float randNum;
srand(seed);
for (i = 0; i < len; i++) {
for(j = 0; j<len; j++){
randNum = (float) rand();
arr[i*len + j] = randNum;
}
}
}
|
7,613 | #include<cstdio>
__global__ void set_only_one(double *M, int size){
int i_index = (blockIdx.x * blockDim.x + threadIdx.x);
int j_index = (blockIdx.y * blockDim.y + threadIdx.y);
if(i_index != 0 || j_index != 2) return;
M[j_index*size + i_index] = 1.0;
}
void caller(){
int siz = 4;
dim3 blockDim(2,2);
dim3 gridDim( 2,2 );
double *arr1;
cudaMalloc(&arr1, sizeof(double) * siz * siz);
cudaMemset((void *)arr1, 0.0, sizeof(double) * siz * siz);
set_only_one<<<gridDim, blockDim >>>(arr1, siz);
cudaThreadSynchronize();
double *hostarr = new double[siz * siz];
cudaMemcpy(hostarr, arr1, sizeof(double)*siz * siz, cudaMemcpyDeviceToHost);
for(int i = 0; i < siz; i++){
for(int j = 0; j < siz;j++){
printf("%lf ", hostarr[i*siz + j]);
}
printf("\n");
}
}
|
7,614 | #include <cstdio>
#include <cassert>
#define MAXN 2000
#define MULSIDE 16 // each block has size SIDE x SIDE
// function for debugging.
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define get(A, x, y, pitch, N) ((x) >= 0 && (x) < N && (y) >= 0 && (y) < N) ? A[(x)*pitch + (y)] : 0
__global__ void game_of_life_iter(char* A, char* B, size_t pitcha, size_t pitchb, int N){
int localx = threadIdx.x;
int localy = threadIdx.y;
int globalx = localx + blockIdx.x * MULSIDE;
int globaly = localy + blockIdx.y * MULSIDE;
char alive = 0;
char self = get(A, globalx, globaly, pitcha, N);
alive += get(A, globalx-1, globaly-1, pitcha, N);
alive += get(A, globalx-1, globaly, pitcha, N);
alive += get(A, globalx-1, globaly+1, pitcha, N);
alive += get(A, globalx, globaly-1, pitcha, N);
alive += get(A, globalx, globaly+1, pitcha, N);
alive += get(A, globalx+1, globaly-1, pitcha, N);
alive += get(A, globalx+1, globaly, pitcha, N);
alive += get(A, globalx+1, globaly+1, pitcha, N);
if (self && (alive < 2 || alive > 3)){
B[globalx * pitchb + globaly] = 0;
}
else if (!self && alive == 3){
B[globalx * pitchb + globaly] = 1;
}
else{
B[globalx * pitchb + globaly] = self;
}
}
__global__ void faster_game_of_life_iter(char* A, char* B, size_t pitcha, size_t pitchb, int N){
__shared__ char source[MULSIDE+2][MULSIDE+2];
int localx = threadIdx.x;
int localy = threadIdx.y;
int globalx = localx + blockIdx.x * MULSIDE;
int globaly = localy + blockIdx.y * MULSIDE;
// index used to address the 'source' array.
int src_x = localx + 1;
int src_y = localy + 1;
source[src_x][src_y] = get(A, globalx, globaly, pitcha, N);
// sides
if (localx == 0)
source[src_x-1][src_y] = get(A, globalx-1, globaly, pitcha, N);
if (localx == MULSIDE-1)
source[src_x+1][src_y] = get(A, globalx+1, globaly, pitcha, N);
if (localy == 0)
source[src_x][src_y-1] = get(A, globalx, globaly-1, pitcha, N);
if (localy == MULSIDE-1)
source[src_x][src_y+1] = get(A, globalx, globaly+1, pitcha, N);
// corners
if (localx == 0 && localy == 0)
source[src_x-1][src_y-1] = get(A, globalx-1, globaly-1, pitcha, N);
if (localx == MULSIDE-1 && localy == 0)
source[src_x+1][src_y-1] = get(A, globalx+1, globaly-1, pitcha, N);
if (localx == 0 && localy == MULSIDE-1)
source[src_x-1][src_y+1] = get(A, globalx-1, globaly+1, pitcha, N);
if (localx == MULSIDE-1 && localy == MULSIDE-1)
source[src_x+1][src_y+1] = get(A, globalx+1, globaly+1, pitcha, N);
__syncthreads();
// count alive neighbors
// unroll to speed up
char self = source[src_x][src_y];
char alive = source[src_x-1][src_y-1] + source[src_x-1][src_y] + source[src_x-1][src_y+1]
+ source[src_x][src_y-1] + source[src_x][src_y+1]
+ source[src_x+1][src_y-1] + source[src_x+1][src_y] + source[src_x+1][src_y+1];
if (self && (alive < 2 || alive > 3)){
B[globalx * pitchb + globaly] = 0;
}
else if (!self && alive == 3){
B[globalx * pitchb + globaly] = 1;
}
else{
B[globalx * pitchb + globaly] = self;
}
}
void copyto(char* dst, char* src, size_t pitch){
gpuErrchk(cudaMemcpy2D((void*)dst, pitch, (void *)src, MAXN, MAXN*sizeof(char), MAXN, cudaMemcpyHostToDevice));
}
void copyback(char* dst, char* src, size_t pitch){
gpuErrchk(cudaMemcpy2D((void*)dst, MAXN, (void *)src, pitch, MAXN*sizeof(char), MAXN, cudaMemcpyDeviceToHost));
}
void cuClear(char* dst, size_t pitch){
gpuErrchk(cudaMemset2D((void*)dst, pitch, 0, MAXN*sizeof(char), MAXN));
}
void print_matrix(int N, char A[]) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
printf("%d", A[i*MAXN + j]);
printf("\n");
}
}
int divCeil(int a, int b){
int c = a / b;
if (c * b < a){
c++;
}
return c;
}
char A[MAXN*MAXN];
int main(int argc, char** argv)
{
char digits[MAXN];
int N, M, s;
s = scanf("%d %d", &N, &M);
for(int i = 0; i < N; i++){
s = scanf("%s", digits);
assert(s>0);
for(int j = 0; j < N; j++){
A[i*MAXN + j] = digits[j]=='0' ? 0 : 1;
}
}
size_t pitch[2];
char *devA[2];
gpuErrchk(cudaMallocPitch(&devA[0], &pitch[0], MAXN*sizeof(char), MAXN));
gpuErrchk(cudaMallocPitch(&devA[1], &pitch[1], MAXN*sizeof(char), MAXN));
copyto(devA[0], (char*)A, pitch[0]);
for (int i = 0; i < M; i++){
int x = i%2;
int BLOCKS = divCeil(N, MULSIDE);
faster_game_of_life_iter <<< dim3(BLOCKS,BLOCKS), dim3(MULSIDE,MULSIDE) >>> (devA[x], devA[!x], pitch[x], pitch[!x], N);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
copyback((char*)A, devA[M%2], pitch[M%2]);
print_matrix(N, A);
return 0;
}
|
7,615 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
typedef struct node *TREE_ptr;
struct node
{
int NODE;
int LEVEL;
TREE_ptr ancestor, left_child, right_child;
int left_level, right_level;
};
void del(int N, int i_min,int j_min, float **UPCluster)
{
int j;
for(j=0;j<N;j++)
{
UPCluster[j_min][j]=0;
UPCluster[i_min][j]=0;
UPCluster[j][j_min]=0;
UPCluster[j][i_min]=0;
}
}
void replace(int N, int i_min, int j_min, float **UPCluster)
{
int j;
//replace i,j with K
for(j=0;j<N;j++)
{
UPCluster[i_min][j] = UPCluster[N][j];
UPCluster[j][i_min] = UPCluster[i_min][j];
}
}
int Max(int x,int y)
{
return (x>y)? x:y;
}
void trans(float **UPCluster,int N,float *UP1D,int i_min,int j_min)
{
int i;
float x;
for(i=0;i<N;i++)
{
int IN = i*N;
x = UP1D[i_min*N+i] = UPCluster[i_min][i];
UP1D[IN+i_min] = x;
UP1D[IN+j_min] = 0;
UP1D[j_min*N+i] = 0;
}
}
void preorder(TREE_ptr ptr,int N)
{
if(ptr)
{
{
if(ptr->ancestor!=NULL)
{
if(ptr->NODE==ptr->ancestor->right_child->NODE)
ptr->right_level=ptr->ancestor->right_level+1;
else
ptr->left_level=ptr->ancestor->left_level+1;
}
}
preorder(ptr->left_child,N);
preorder(ptr->right_child,N);
}
}
void Inoder_Result(TREE_ptr ptr,int N)
{
int i;
if(ptr)
{
Inoder_Result(ptr->left_child,N);
if(ptr->ancestor!=NULL)
{
//fprintf(ftree,"%d ",ptr->NODE);
//fprintf(ftree,"%d ",ptr->LEVEL);
//fprintf(ftree,"%d ",ptr->ancestor->LEVEL);
}
if(ptr->NODE >= N)
{
printf(",");
//fprintf(fw,",");
}
else
{
for(i=0;i<ptr->left_level;i++)
{
printf("(");
//fprintf(fw,"(");
}
printf("%d",ptr->NODE);
//fprintf(fw,"%d",ptr->NODE);
for(i=0;i<ptr->right_level;i++)
{
printf(")");
//fprintf(fw,")");
}
}
Inoder_Result(ptr->right_child,N);
}
}
void node_Initial(struct node *node, int N)
{
int i;
for(i=0;i<2*N-1;i++)
{
node[i].NODE=i;
node[i].LEVEL=0;
node[i].ancestor=NULL;
node[i].left_child=NULL;
node[i].right_child=NULL;
node[i].left_level=0;
node[i].right_level=0;
}
}
void Build_Tree(struct node *node,int i,int j,int K)
{
//when node[i].ancestor is NULL and node[j].ancestor is NULL
if((!node[i].ancestor)&&(!node[j].ancestor))
{
node[K].left_child = &(node[i]);
node[K].right_child = &(node[j]);
}
//when node[i].ancestor isn't NULL and node[j].ancestor is NULL
else if((node[i].ancestor)&&(!node[j].ancestor))
{
node[K].left_child = node[i].ancestor;
node[K].right_child = &(node[j]);
}
//when node[i].ancestor is NULL and node[j].ancestor isn't NULL
else if(!(node[i].ancestor)&&(node[j].ancestor))
{
node[K].left_child = &(node[i]);
node[K].right_child = node[j].ancestor;
}
//when both node[i].ancestor and node[j].ancestor are not NULL
else
{
node[K].left_child = node[i].ancestor;
node[K].right_child = node[j].ancestor;
}
node[i].ancestor = node[j].ancestor = &(node[K]);
node[K].LEVEL = Max(node[i].LEVEL,node[j].LEVEL)+1;
}
void show(float **UPCluster,int N){
int i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
printf("%3.0f ",UPCluster[i][j]);
}
printf("\n");
}
}
void cpu(int N,int K,float **UPCluster,int *ind,struct node *node)
{
int i=0,j=0;
//int size = N*N*sizeof(float);
//save node
int *c_node;
c_node = (int*)malloc(2*N*sizeof(int));
int i_min,j_min;
//float min;
cudaEvent_t t0, t1;
float t00;
cudaEventCreate (&t0); // 建立CUDA引發 – 事件開始
cudaEventCreate (&t1); // 建立CUDA引發 – 事件開始
cudaEventRecord(t0, 0); // 記錄事件開始時間結束
//clock_t start,end;
//start = clock();
while(K<2*N-1)
{
//printf("K=%d N=%d\n",K,N);
//每個ROW找0之前的最小值,並記錄其位置
float min = 1000;
for(i=0;i<N;i++)
{
for(j=0;j<i;j++)
{
if((UPCluster[i][j] < min) && (UPCluster[i][j] != 0))
{
i_min = i;
j_min = j;
min = UPCluster[i][j];
}
}
}
c_node[2*(K-N)] = i_min;
c_node[2*(K-N)+1] = j_min;
for(j=0;j<N;j++)
{
UPCluster[N][j] = (UPCluster[i_min][j] + UPCluster[j_min][j])/2.0;
}
UPCluster[N][i_min] = UPCluster[N][j_min] = 0;
//delete i j
del(N,i_min,j_min,UPCluster);
//replace i,j with K
replace(N,i_min,j_min,UPCluster);
//int q = 0;
//scanf("%d",&q);
//show(UPCluster,N);
printf("min = %5.2f i=%d j=%d\n",min,i_min,j_min);
//printf("%d\n",K);
K++;
}
cudaThreadSynchronize();//等thread都執行完畢
cudaEventRecord(t1, 0); // 記錄事件結束時間結束
cudaEventSynchronize(t1);
cudaEventElapsedTime(&t00, t0, t1); // 計算時間差
//end = clock();
for(K=N;K<2*N-1;K++)
{
i_min = c_node[2*(K-N)];
j_min = c_node[2*(K-N)+1];
//printf("i_min = %3d j_min = %3d\n",i_min,j_min);
Build_Tree(node,ind[i_min],ind[j_min],K);
ind[i_min] = K;
}
printf("CPU time = %f\n",t00);
}
int main(int argc, char *argv[])
{
int N;
const char *filename;
filename = argv[1];
//filename = "testcase.txt";
FILE *fp;
fp = fopen(filename,"r");
if(fp==NULL)
{
printf("Failed to open file: %s\n", filename);
return 1;
}
//read size
fscanf(fp,"%d",&N);
//distribute memory for TREE POINTER
struct node *node = (struct node*)malloc((2*N-1)*sizeof(struct node));
//Initialized node
node_Initial(node,N);
char *q_tree;
q_tree = (char*)malloc(N*sizeof(char));
//FILE *ftree;
//distribute memory for matrix
int i,j;
float **UPCluster;
int *ind;
UPCluster = (float **)malloc((N+1)*sizeof(float*));
for(i=0;i<(N+1);i++)
{
UPCluster[i] =(float*)malloc(N*sizeof(float*));
}
ind = (int *)malloc(N*sizeof(int));
//read distance matrix
do
{
for(i=0;i<N;i++)
{
ind[i]=i;//index
fscanf(fp,"%s",&q_tree[i]);
//printf("q_tree = %c \n",q_tree[i]);
//fprintf(ftree,"%s ",&q_tree[i]);
for(j=0;j<N;j++)
{
fscanf(fp,"%f,",&UPCluster[i][j]);
//printf("%3.0f ",UPCluster[i][j]);
}
//printf("\n");
}
}while(fscanf(fp,"%f",UPCluster[i])!=EOF);
fclose(fp);
printf("Source : \n");
//show(UPCluster,N);
printf("-----------------------------\n\n");
int K=N;
printf("K=%d N=%d\n",K,N);
printf("Start\n\n\n");
cpu(K,N,UPCluster,ind,node);
preorder(&node[2*N-2],N);
//FILE *fw;
//fw = fopen("up_result.txt","w");
Inoder_Result(&node[2*N-2],N);
//fclose(fw);
//fclose(ftree);
printf("\n");
free(q_tree);
free(UPCluster);
free(node);
free(ind);
}
|
7,616 | #include "includes.h"
#define SIZ 20
#define num_inp 4
using namespace std;
typedef struct edge {
int first, second;
} edges;
__global__ void logprobs_kernel(double * corect_logprobs, double * probs, int* y, int size)
{
int i = blockIdx.x;
corect_logprobs[i] = -log(probs[i*size + y[i]]);
} |
7,617 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 9
__global__ void kernel(int *ptr)
{
*ptr = *ptr + N;
}
int main(void)
{
int computeMajor;
int computeMinor;
cudaDeviceGetAttribute(&computeMajor, cudaDevAttrComputeCapabilityMajor,0);
cudaDeviceGetAttribute(&computeMinor, cudaDevAttrComputeCapabilityMinor,0);
printf("Compute Capability: %d.%d\n", computeMajor, computeMinor);
int *hptr;
int *dptr;
size_t size = sizeof(int);
hptr = (int *)malloc(size);
cudaMalloc((void **)&dptr, size);
//memset(hptr, 1, 1);
*hptr = 1;
printf("%d + %d = ", *hptr, N);
cudaMemcpy(dptr, hptr, size, cudaMemcpyHostToDevice);
kernel<<<2,3>>>(dptr);
cudaMemcpy(hptr, dptr, size, cudaMemcpyDeviceToHost);
printf("%d\n", *hptr);
free(hptr);
cudaFree(dptr);
}
|
7,618 | #include "includes.h"
using namespace std;
__global__ void matrixEuclideanDistanceKernelFast(float* in, float* out, int n, int m){
__shared__ float Ys[16][16];
__shared__ float Xs[16][16];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int yBegin = by * 16 * m;
int xBegin = bx * 16 * m;
int yEnd = yBegin + m - 1, y, x, k, o;
float tmp, s = 0;
for (y = yBegin, x = xBegin;
y <= yEnd;
y += 16, x += 16){
Ys[ty][tx] = in[y + ty * m + tx];
Xs[tx][ty] = in[x + ty * m + tx];
__syncthreads();
for (k = 0; k<16; k++){
tmp = Ys[ty][k] - Xs[k][tx];
s += tmp * tmp;
}
__syncthreads();
}
o = by * 16 * n + ty * n + bx * 16 + tx;
out[o] = s;
} |
7,619 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <string.h>
#include <utility>
#include <ctime>
#include <time.h>
using namespace std;
// output
ofstream fo("Ans.txt");
// Cac bien hang so
const int ARRAY_SIZE_INP = 12005;
const int ARRAY_BYTES_INP = ARRAY_SIZE_INP * sizeof(int);
const int ARRAY_SIZE_OUT = 605;
const int ARRAY_BYTES_OUT = ARRAY_SIZE_OUT * sizeof(int);
//cac bien chinh
int l = 9, d = 2;
char cDataInp[ARRAY_SIZE_INP];
int h_dataMotif[ARRAY_SIZE_INP];
string sDataInp[20];
struct Motif_Ans
{
int dis;
string motif;
int adress[20];
};
//input tu file
void File_Input()
{
l = 9; d = 2;
FILE * pFile;
pFile = fopen("datacu.txt", "r");
if (pFile == NULL)
perror("Error opening file");
else
{
if (fgets(cDataInp, ARRAY_SIZE_INP, pFile) != NULL)
cout << "nhap du lieu thanh cong!\n";
fclose(pFile);
}
for (int i = 0; i < strlen(cDataInp); ++i) {
//A=0 C=1 G=2 T=3
switch (cDataInp[i])
{
case 'A': { h_dataMotif[i] = 0; break; }
case 'C': { h_dataMotif[i] = 1; break; }
case 'G': { h_dataMotif[i] = 2; break; }
case 'T': { h_dataMotif[i] = 3; break; }
default: cout << "error chuyen sang int";
break;
}
}
int k = 0;
string temp = cDataInp;
//cout << temp << endl;
for (int i = 0; i < temp.size(); i += 600) {
sDataInp[k] = temp.substr(i, 600);
//cout << k << ". " << sDataInp[k] << endl;
k++;
}
}
int score_ham(string s1, string s2)
{
int res = 0;
for (int i = 0; i < s1.size(); ++i) if (s1[i] != s2[i]) res++;
return res;
}
Motif_Ans dis_hamming(string s)
{
Motif_Ans res;
res.motif = s;
int res_Sum = 0, temp_score = 999, temp_Adress;
for (int i = 0; i < 20; ++i)
{
string s1 = sDataInp[i];
temp_score = 999;
for (int j = 0; j < s1.size() - l + 1; ++j)
{
string temp_str = s1.substr(j, l);
int score_s = score_ham(s, temp_str);
if (score_s < temp_score)
{
temp_score = score_s;
temp_Adress = j + 1;
}
}
res_Sum += temp_score;
res.adress[i] = temp_Adress;
}
res.dis = res_Sum;
return res;
}
__global__ void patternBarching(const int* d_datainp, const int l, const int d, int *ans, int *ans_dis) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < 600 - l) {
//printf("\n %d", index);
int ansMotif_sorce = 999;// motif tra ra
int ansMotif_string[40];//motif tra ra
int motif_NeSorce = 999;//kq tra ve ham NE
int motif_NeString[40];//kq tra ve ham NE
int temp_Sorce = 999;
int temp_Str[40];
//cat chuoi motif
for (int i = 0; i < l; ++i) {
ansMotif_string[i] = d_datainp[i + index];
motif_NeString[i] = ansMotif_string[i];
}
//begin tinh hamming
int tempRow, tempSubRow;
for (int i = 0; i < 20; ++i)
{
tempRow = 999;
for (int j = i * 600; j < (i + 1) * 600 - l; ++j)
{
tempSubRow = 0;
for (int k = 0; k < l; k++) {
if (ansMotif_string[k] != d_datainp[k + j]) tempSubRow++;
}
if (tempSubRow < tempRow) tempRow = tempSubRow;
}
ansMotif_sorce += tempRow;
}
//end tinh hamming cho chuoi vao
//begin tinh pattern branching
for (int a = 0; a <= d; a++) {
//kiem tra motif dis
if (motif_NeSorce < ansMotif_sorce) {
ansMotif_sorce = motif_NeSorce;
for (int i = 0; i < l; ++i) {
ansMotif_string[i] = motif_NeString[i];
temp_Str[i] = motif_NeString[i];
}
}
else
{//gan bien Ham Ne
for (int i = 0; i < l; ++i) {
temp_Str[i] = ansMotif_string[i];
}
}//end kiem tra motif
//begin ham bestNeighbor
int change = -1;
for (int b = 0; b < l; ++b) {
//trg hop 0 A
if (temp_Str[b] != 0) {
change = temp_Str[b];
temp_Str[b] = 0;
temp_Sorce = 0;//diem dis
//begin tinh hamming
for (int i = 0; i < 20; ++i)
{
tempRow = 999;
for (int j = i * 600; j < (i + 1) * 600 - l; ++j)
{
tempSubRow = 0;
for (int k = 0; k < l; k++) {
if (temp_Str[k] != d_datainp[k + j]) tempSubRow++;
}
if (tempSubRow < tempRow) tempRow = tempSubRow;
}
temp_Sorce += tempRow;
}
//end tinh hamming cho chuoi vao
//kiem tra dis motif Ne
if (temp_Sorce < motif_NeSorce) {
motif_NeSorce = temp_Sorce;
for (int c = 0; c < l; ++c) {
motif_NeString[c] = temp_Str[c];
}
}
temp_Str[b] = change;//tra lai gia tri ban dau
}
//trg hop 1 C
if (temp_Str[b] != 1) {
change = temp_Str[b];
temp_Str[b] = 1;
temp_Sorce = 0;//diem dis
//begin tinh hamming
for (int i = 0; i < 20; ++i)
{
tempRow = 999;
for (int j = i * 600; j < (i + 1) * 600 - l; ++j)
{
tempSubRow = 0;
for (int k = 0; k < l; k++) {
if (temp_Str[k] != d_datainp[k + j]) tempSubRow++;
}
if (tempSubRow < tempRow) tempRow = tempSubRow;
}
temp_Sorce += tempRow;
}
//end tinh hamming cho chuoi vao
//kiem tra dis motif Ne
if (temp_Sorce < motif_NeSorce) {
motif_NeSorce = temp_Sorce;
for (int c = 0; c < l; ++c) {
motif_NeString[c] = temp_Str[c];
}
}
temp_Str[b] = change;
}
//trg hop 2 G
if (temp_Str[b] != 2) {
change = temp_Str[b];
temp_Str[b] = 2;
temp_Sorce = 0;//diem dis
//begin tinh hamming
for (int i = 0; i < 20; ++i)
{
tempRow = 999;
for (int j = i * 600; j < (i + 1) * 600 - l; ++j)
{
tempSubRow = 0;
for (int k = 0; k < l; k++) {
if (temp_Str[k] != d_datainp[k + j]) tempSubRow++;
}
if (tempSubRow < tempRow) tempRow = tempSubRow;
}
temp_Sorce += tempRow;
}
//end tinh hamming cho chuoi vao
//kiem tra dis motif Ne
if (temp_Sorce < motif_NeSorce) {
motif_NeSorce = temp_Sorce;
for (int c = 0; c < l; ++c) {
motif_NeString[c] = temp_Str[c];
}
}
temp_Str[b] = change;
}
//trg hop 3 T
if (temp_Str[b] != 3) {
change = temp_Str[b];
temp_Str[b] = 3;
temp_Sorce = 0;//diem dis
//begin tinh hamming
for (int i = 0; i < 20; ++i)
{
tempRow = 999;
for (int j = i * 600; j < (i + 1) * 600 - l; ++j)
{
tempSubRow = 0;
for (int k = 0; k < l; k++) {
if (temp_Str[k] != d_datainp[k + j]) tempSubRow++;
}
if (tempSubRow < tempRow) tempRow = tempSubRow;
}
temp_Sorce += tempRow;
}
//end tinh hamming cho chuoi vao
//kiem tra dis motif Ne
if (temp_Sorce < motif_NeSorce) {
motif_NeSorce = temp_Sorce;
for (int c = 0; c < l; ++c) {
motif_NeString[c] = temp_Str[c];
}
}
temp_Str[b] = change;
}
}
}//end Ne
//end tinh
int dem = 0;
int res = 0;
for (int i = 0; i < l; ++i) {
res = res | (ansMotif_string[i] << dem);
dem += 2;
if (index == 574) printf("%d ", ansMotif_string[i]);
}
ans[index] = res;
ans_dis[index] = ansMotif_sorce;
}
}
int main()
{
File_Input();
//test
/*string test = "GTTCGGCGT";
Motif_Ans testMoitf = dis_hamming(test);
fo << testMoitf.dis << endl;
cout<<sDataInp[0].substr(574, l) << endl;
cout << h_dataMotif[574] << endl;*/
//end test
int h_dataOut[ARRAY_SIZE_OUT];
int h_dataDis[ARRAY_BYTES_OUT];
for (int i = 0; i < 600; ++i) {
h_dataOut[i] = -1;
h_dataDis[i] = 999;
}
//GPU khoi tao bien va bo nho
int *d_dataMotif;
if (cudaMalloc(&d_dataMotif, ARRAY_BYTES_INP) != cudaSuccess) {
cout << "error allocating memory!" << endl;
return 0;
}
int *d_dataOut;
if (cudaMalloc(&d_dataOut, ARRAY_BYTES_OUT) != cudaSuccess) {
cout << "error allocating memory!" << endl;
cudaFree(d_dataMotif);
return 0;
}
int *d_dataDis;
if (cudaMalloc(&d_dataDis, ARRAY_BYTES_OUT) != cudaSuccess) {
cout << "error allocating memory!" << endl;
cudaFree(d_dataMotif);
cudaFree(d_dataDis);
return 0;
}
//GPU copy memory
if (cudaMemcpy(d_dataMotif, h_dataMotif, ARRAY_BYTES_INP, cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "error copying memory!" << endl;
cudaFree(d_dataMotif);
cudaFree(d_dataOut);
cudaFree(d_dataDis);
return 0;
}
if (cudaMemcpy(d_dataOut, h_dataOut, ARRAY_BYTES_OUT, cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "error copying memory!" << endl;
cudaFree(d_dataMotif);
cudaFree(d_dataOut);
cudaFree(d_dataDis);
return 0;
}
if (cudaMemcpy(d_dataDis, h_dataDis, ARRAY_BYTES_OUT, cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "error copying memory!" << endl;
cudaFree(d_dataMotif);
cudaFree(d_dataOut);
cudaFree(d_dataDis);
return 0;
}
cout << "dang chay ...." << endl;
//khoi tao chay cuda
int threadsPerBlock = 256;
int blocksPerGrid = (600 + threadsPerBlock - 1) / threadsPerBlock;
patternBarching <<<blocksPerGrid, threadsPerBlock >>> (d_dataMotif, l, d, d_dataOut, d_dataDis);
fo << "\nTime " << clock() / (double)1000 << " Sec" << endl;
//copy data tro ve
if (cudaMemcpy(h_dataOut, d_dataOut, ARRAY_BYTES_OUT, cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "error copying memory!" << endl;
cudaFree(d_dataMotif);
cudaFree(d_dataOut);
cudaFree(d_dataDis);
return 0;
}
if (cudaMemcpy(h_dataDis, d_dataDis, ARRAY_BYTES_OUT, cudaMemcpyDeviceToHost) != cudaSuccess) {
cout << "error copying memory!" << endl;
cudaFree(d_dataMotif);
cudaFree(d_dataOut);
cudaFree(d_dataDis);
return 0;
}
//lay best motif
cout << "\n du lieu tra ve" << endl;
Motif_Ans best_motif;
//best_motif.dis = 999;
//for (int i = 0; i < 600; i++)
//{
// int chuyenStr = h_dataOut[i];
// int k = 0;
// string res = "";
// //cout << chuyenStr << endl;
// if (chuyenStr != -1) {
// //chuyen kieu in sang string
// for (int j = 0; j < l; ++j) {
// int temp = (chuyenStr >> k) & 3;
// //cout << temp << ' ';
// switch (temp)
// {
// case 0:
// {
// res += 'A'; break;
// }
// case 1:
// {
// res += 'C'; break;
// }
// case 2:
// {
// res += 'G'; break;
// }
// case 3:
// {
// res += 'T'; break;
// }
// }
// k += 2;
// }
// //if (i == 574) fo << res << endl;
// //ket thuc chuyen
// //kiem tra do dai va tra vi tri
// temp_motif_return = dis_hamming(res);
// if (temp_motif_return.dis < best_motif.dis) {
// cout << "thay doi best" << endl;
// best_motif.dis = temp_motif_return.dis;
// best_motif.motif = temp_motif_return.motif;
// for (int z = 0; z < 20; ++z) {
// best_motif.adress[z] = temp_motif_return.adress[z];
// }
// }
// //end kiem tra
// cout << "------------" << endl;
// cout << temp_motif_return.motif << endl;
// cout << temp_motif_return.dis << endl;
// cout << best_motif.motif << endl;
// cout << best_motif.dis << endl;
// cout << "+++++++++++++" << endl;
// }
//}
int minMotif = 0;
for (int i = 1; i < 600; i++) {
if (h_dataDis[i] != 999) {
minMotif = h_dataDis[i] < h_dataDis[minMotif] ? i : minMotif;
}
}
cout << minMotif <<endl;
int k = 0;
string res = "";
int chuyenStr = h_dataOut[minMotif];
for (int j = 0; j < l; ++j) {
int temp = (chuyenStr >> k) & 3;
switch (temp)
{
case 0:
{
res += 'A'; break;
}
case 1:
{
res += 'C'; break;
}
case 2:
{
res += 'G'; break;
}
case 3:
{
res += 'T'; break;
}
}
k += 2;
}
best_motif = dis_hamming(res);
//tra ket qua
fo << "Sroce: " << best_motif.dis;
fo << "Best motif: " << best_motif.motif << endl << "Motif location: " << endl;
for (int z = 0; z < 20; ++z) {
fo << best_motif.adress[z] << ' ';
}
cout << "xong" << endl;
fo << "\nEnd Time " << clock() / (double)1000 << " Sec" << endl;
cudaFree(d_dataMotif);
cudaFree(d_dataOut);
return 0;
} |
7,620 | //========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// MAIN FUNCTION HEADER
//======================================================================================================================================================150
#include "./../main.h" // (in the main program folder) needed to recognized input parameters
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "./../util/device/device.h" // (in library path specified to compiler) needed by for device functions
#include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER
//======================================================================================================================================================150
#include "./kernel_gpu_cuda_wrapper.h" // (in the current directory)
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
#include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables
//========================================================================================================================================================================================================200
// KERNEL_GPU_CUDA_WRAPPER FUNCTION
//========================================================================================================================================================================================================200
void
kernel_gpu_cuda_wrapper(par_str par_cpu,
dim_str dim_cpu,
box_str* box_cpu,
FOUR_VECTOR* rv_cpu,
fp* qv_cpu,
FOUR_VECTOR* fv_cpu)
{
//======================================================================================================================================================150
// CPU VARIABLES
//======================================================================================================================================================150
// timer
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//======================================================================================================================================================150
// GPU SETUP
//======================================================================================================================================================150
//====================================================================================================100
// INITIAL DRIVER OVERHEAD
//====================================================================================================100
cudaThreadSynchronize();
//====================================================================================================100
// VARIABLES
//====================================================================================================100
box_str* d_box_gpu;
FOUR_VECTOR* d_rv_gpu;
fp* d_qv_gpu;
FOUR_VECTOR* d_fv_gpu;
dim3 threads;
dim3 blocks;
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
blocks.x = dim_cpu.number_boxes;
blocks.y = 1;
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
time1 = get_time();
//======================================================================================================================================================150
// GPU MEMORY (MALLOC)
//======================================================================================================================================================150
//====================================================================================================100
// GPU MEMORY (MALLOC) COPY IN
//====================================================================================================100
//==================================================50
// boxes
//==================================================50
cudaMalloc( (void **)&d_box_gpu,
dim_cpu.box_mem);
//==================================================50
// rv
//==================================================50
cudaMalloc( (void **)&d_rv_gpu,
dim_cpu.space_mem);
//==================================================50
// qv
//==================================================50
cudaMalloc( (void **)&d_qv_gpu,
dim_cpu.space_mem2);
//====================================================================================================100
// GPU MEMORY (MALLOC) COPY
//====================================================================================================100
//==================================================50
// fv
//==================================================50
cudaMalloc( (void **)&d_fv_gpu,
dim_cpu.space_mem);
time2 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY
//======================================================================================================================================================150
//====================================================================================================100
// GPU MEMORY (MALLOC) COPY IN
//====================================================================================================100
//==================================================50
// boxes
//==================================================50
cudaMemcpy( d_box_gpu,
box_cpu,
dim_cpu.box_mem,
cudaMemcpyHostToDevice);
//==================================================50
// rv
//==================================================50
cudaMemcpy( d_rv_gpu,
rv_cpu,
dim_cpu.space_mem,
cudaMemcpyHostToDevice);
//==================================================50
// qv
//==================================================50
cudaMemcpy( d_qv_gpu,
qv_cpu,
dim_cpu.space_mem2,
cudaMemcpyHostToDevice);
//====================================================================================================100
// GPU MEMORY (MALLOC) COPY
//====================================================================================================100
//==================================================50
// fv
//==================================================50
cudaMemcpy( d_fv_gpu,
fv_cpu,
dim_cpu.space_mem,
cudaMemcpyHostToDevice);
time3 = get_time();
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
// launch kernel - all boxes
kernel_gpu_cuda<<<blocks, threads>>>( par_cpu,
dim_cpu,
d_box_gpu,
d_rv_gpu,
d_qv_gpu,
d_fv_gpu);
checkCUDAError("Start");
cudaThreadSynchronize();
time4 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY (CONTD.)
//======================================================================================================================================================150
cudaMemcpy( fv_cpu,
d_fv_gpu,
dim_cpu.space_mem,
cudaMemcpyDeviceToHost);
time5 = get_time();
//======================================================================================================================================================150
// GPU MEMORY DEALLOCATION
//======================================================================================================================================================150
cudaFree(d_rv_gpu);
cudaFree(d_qv_gpu);
cudaFree(d_fv_gpu);
cudaFree(d_box_gpu);
time6 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of GPU_CUDA KERNEL:\n");
printf("%15.12f s, %15.12f % : GPU: SET DEVICE / DRIVER INIT\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: ALO\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY IN\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY OUT\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: FRE\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
}
|
7,621 | #include "includes.h"
__global__ void HistogramKernel(unsigned int * input, unsigned int size, unsigned int* histogram, unsigned int pass) {
int mid = threadIdx.x + blockIdx.x * blockDim.x;
if (mid < size) {
atomicAdd(&histogram[(input[mid]>>pass) & 0x01], 1);
}
} |
7,622 | #include <cuda.h>
#include <stdio.h>
int main() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
printf("Hello World\n");
unsigned sum = 0;
for (unsigned ii = 0; ii < 100000; ++ii)
sum += ii;
cudaEventRecord(stop, 0);
float elapsedtime;
cudaEventElapsedTime(&elapsedtime, start, stop);
printf("time = %f ms\n", elapsedtime);
return 0;
}
|
7,623 | /**
* This program computes the 1-D convolution with naive algorithm in parallel
* that uses advantage of constant memory.
*/
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <algorithm>
#include <vector>
using std::cout;
using std::generate;
using std::vector;
// KSIZE, TILE_SIZE and MAX_MASK_WIDTH should all
// have the same number
#define ISIZE 2000
#define KSIZE 500
#define BLOCK_SIZE 10
#define TILE_SIZE 500
#define MAX_MASK_WIDTH 500
__constant__ float K[MAX_MASK_WIDTH];
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n", __FILE__, __LINE__);\
return EXIT_FAILURE;}} while(0)
#define CHECK(x) do { if((x) !=cudaSuccess) { \
printf("Error at %s:%d\n", __FILE__, __LINE__); \
return EXIT_FAILURE;}} while(0)
// kernel function
__global__ void convolution_1D_basic_kernal(float *I, float *O,
int Mask_Width, int Width){
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float I_ds[TILE_SIZE];
I_ds[threadIdx.x] = I[i];
__syncthreads();
int This_tile_start_point = blockIdx.x * blockDim.x;
int Next_tile_start_point = (blockIdx.x + 1) * blockDim.x;
int N_start_point = i - (Mask_Width/2);
float Ovalue = 0;
for (int j = 0; j < Mask_Width; j++) {
int I_index = N_start_point + j;
if(I_index >= 0 && I_index < Width) {
if ( (I_index >= This_tile_start_point) && (I_index < Next_tile_start_point)){
Ovalue += I_ds[threadIdx.x+j-(Mask_Width/2)]*K[j];
} else {
Ovalue += I[I_index] * K[j];
}
}
}
O[i] = Ovalue;
}
// cpu function
void convolution_1D_basic_kernal_CPU(vector<float> &host_i, vector<float> &host_k,
int &cpuRef, int Mask_Width, int Width, int size){
cpuRef = 0.0;
for (int i = 0; i < size; i++){
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >=0 && N_start_point + j < Width) {
cpuRef += host_i[N_start_point + j] * host_k[j];
}
}
}
}
// function for checking gpu reference array
void checkArray(vector<float> &host_o, int &gpuRef,int size){
gpuRef = 0;
for (int x = 0; x < size ; x ++) {
gpuRef += host_o[x];
}
}
// main function
int main(void){
// check and set device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int cpuRef = 0;
int gpuRef = 0;
int isize = ISIZE; // size of function f or array f
int ksize = KSIZE; // size of function g or array g
int osize = ISIZE + KSIZE - 1; // size of output function f*g.
int blockSize = BLOCK_SIZE;
int width = (int) ISIZE/10;
int mask_width = 2*width +1;
printf("size of i array: %d\n", isize);
printf("size of k array: %d\n", ksize);
printf("size of block: %d\n", BLOCK_SIZE);
// initialize array
vector<float> host_i (isize);
vector<float> host_k (ksize);
vector<float> host_o (osize);
vector<float> cpuRefArr (osize);
// initialize random number
srand ((int)time(0));
// generate elements in arrays
generate(host_i.begin(), host_i.end(), []() { return rand() % 9; });
generate(host_k.begin(), host_k.end(), []() { return rand() % 9; });
//memory allocation
float *dev_i, *dev_k, *dev_o;
cudaMalloc(&dev_i, isize * sizeof(float));
cudaMalloc(&dev_k, ksize * sizeof(float));
cudaMalloc(&dev_o, osize * sizeof(float));
//cudaMemcopyHostToDevice
cudaMemcpy(dev_i, host_i.data(), isize * sizeof(float), cudaMemcpyHostToDevice);
//instead of cudaMemcopyHostToDevice we use cudaMemcpyToSymbol
// to copy to constant memory.
cudaMemcpyToSymbol(K, host_k.data(), ksize * sizeof(float));
//initalize dimension
dim3 block(isize/blockSize);
dim3 grid(blockSize);
float GPUtime, CPUtime;
cudaEvent_t start, stop;
// timer starts for GPU calculation
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//kernel launch
convolution_1D_basic_kernal <<< grid, block >>> (dev_i, dev_o, mask_width, width);
//cudaMemcopyDeviceToHost
cudaMemcpy(host_o.data(), dev_o, osize * sizeof(float), cudaMemcpyDeviceToHost);
// timer stops for GPU calculation
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&GPUtime, start, stop);
// timer starts for CPU calculation
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//calculate on CPU and check the result (single thread)
convolution_1D_basic_kernal_CPU(host_i, host_k, cpuRef, mask_width, width, osize);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&CPUtime, start, stop);
printf("Compute time on GPU: %3.6f ms \n", GPUtime);
printf("Compute time on CPU: %3.6f ms \n", CPUtime);
//checkResult
checkArray(host_o, gpuRef, osize);
double epsilon = 1.0E-8;
if(abs(cpuRef - gpuRef)<epsilon)
printf("Check Result: Arrays matched\n");
else
printf("Check Result: Arrays do not match\n");
//Free Memory
cudaFree(dev_i);
cudaFree(dev_k);
cudaFree(dev_o);
return(0);
} |
7,624 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__device__ void add_array(float *a, float *b, float *c, int n)
{
size_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx<n) c[idx] = a[idx] + b[idx];
}
__global__ void add_array_wrapper(float *a, float *b, float *c, int n)
{
add_array(a, b, c, n);
}
int main(int argc, char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 20;
float *a = new float[n];
float *b = new float[n];
float *c = new float[n];
for(int i=0; i<n; i++)
{
a[i] = i;
b[i] = (i%5)+1;
c[i] = 0;
}
// CPU computation
for(int i=0; i<n; i++) c[i] = a[i] + b[i];
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// init c
for(int i=0; i<n; i++) c[i] = 0;
// copy to device
float *d_a, *d_b, *d_c;
size_t nbytes = (size_t)(n)*sizeof(float);
cudaMalloc(&d_a, nbytes); CUDA_CHECK;
cudaMalloc(&d_b, nbytes); CUDA_CHECK;
cudaMalloc(&d_c, nbytes); CUDA_CHECK;
cudaMemcpy(d_a, a, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_b, b, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_c, c, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
// launch kernel
dim3 block = dim3(128,1,1);
// dim3 grid = dim3((n + block.x –1) / block.x, 1, 1);
dim3 grid = dim3((n+block.x-1)/block.x,1,1);
add_array_wrapper<<<grid, block>>>(d_a, d_b, d_c, n);
// copy to host and deallocate
cudaMemcpy(c, d_c, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaFree(d_a); CUDA_CHECK;
cudaFree(d_b); CUDA_CHECK;
cudaFree(d_c); CUDA_CHECK;
// GPU computation
// ###
// ### TODO: Implement the array addition on the GPU, store the result in "c"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later we will only include "helper.h"
// print result
cout << "GPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
delete[] b;
delete[] c;
}
|
7,625 | #include <stdio.h>
__global__ void cuda_hello(){
printf("Hello from your GPU\n");
}
int main(void){
cuda_hello<<<1,1>>>();
return 0;
}
|
7,626 | #include "includes.h"
__global__ void matrixVectorMultKernel(float* fltMatrix, float* vec, float* output, int rows, int columns){
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < rows){
float sum = 0.0f;
for (int col = 0; col < columns; ++col) {
sum += fltMatrix[row * columns + col] + vec[col];
}
output[row] = sum;
}
} |
7,627 | /*
* a simple test
*/
__device__ float data1[1024];
__device__ float data2[1024];
__device__ float data3[1024];
__device__ void mult(float d1[1024],
float d2[1024],
float d3[1024])
{
int i;
int j, k, l;
if (threadIdx.x != 0)
return;
j = 0;
k = 0;
l = 1;
for (i = 0; i < 1024; i++) {
d1[j+k+l] = 1.0;
d2[j+k+l] = 0.0;
d3[j+k+l] = 2.0;
j++;
k++;
l++;
}
}
__global__ void doit(int start, int end) {
mult(data1, data2, data3);
}
|
7,628 |
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
__global__ void get_negative_columns(float * d_matrix, int *d_result, int size);
void fillUpMatrix(float *matrix, int size);
__device__ int d_next_idx = 0;
__device__ int *d_result_cnt = 0;
//__device__ int *d_result;
int main(int argc, char *argv[])
{
long numElement = 0;
if (argc < 2)
{
numElement = 1000000;
printf("no arg given, %li elements will be generated.\n", numElement);
}
else
{
numElement = (long)strtod(argv[1], NULL);
}
const long size = (long)sqrt(numElement);
printf("\nWe will be working with %li X %li matrix.\n", size, size);
numElement = size * size;
const int MATRIX_BYTE_SIZE = numElement * sizeof(float);
const int RESULT_BYTE_SIZE = size * sizeof(int);
float *h_matrix = (float *)malloc(MATRIX_BYTE_SIZE);
fillUpMatrix(h_matrix, size);
// declare, allocate, and zero out GPU memory
float * d_matrix;
cudaMalloc((void **)&d_matrix, MATRIX_BYTE_SIZE);
// copy the matrix from Host to GPU
cudaMemcpy(d_matrix, h_matrix, MATRIX_BYTE_SIZE, cudaMemcpyHostToDevice);
//DEBUG
float *h_matrix_1 = (float *)malloc(MATRIX_BYTE_SIZE);
int *d_result;
cudaMalloc((void **)&d_result, RESULT_BYTE_SIZE);
//cudaMemset((void *)d_result, 11, RESULT_BYTE_SIZE);
int *h_result = (int *)malloc(RESULT_BYTE_SIZE);
// copy the result of process from GPU
//cudaMemcpy(h_result, d_result, RESULT_BYTE_SIZE, cudaMemcpyDeviceToHost);
//printf("\nTHIS IS BEFORE:\n");
//for(int i=0; i<size; ++i){
// *(h_result + i) = 12;
// printf("%d, ",*(h_result + i));
//}
// launch the kernel
const int NUM_THREAD = size;
const int BLOCK_WIDTH = 1000;
if(NUM_THREAD > 1000)
get_negative_columns<<<NUM_THREAD / BLOCK_WIDTH, BLOCK_WIDTH>>>(d_matrix, d_result, size);
else get_negative_columns<<<1, NUM_THREAD>>>(d_matrix, d_result, size);
// force the printf()s to flush
//cudaDeviceSynchronize();
int h_result_cnt = 0;
// copy the result of process from GPU
cudaMemcpy(h_result, d_result, RESULT_BYTE_SIZE, cudaMemcpyDeviceToHost);
cudaMemcpy(&h_result_cnt, &d_result_cnt, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_matrix_1, d_matrix, MATRIX_BYTE_SIZE, cudaMemcpyDeviceToHost);
/*
// PRINT MATRIX
for(int i=0; i<size; ++i){
for(int j=0; j<size; ++j){
printf("%f",*(h_matrix_1 + (i * size) + j));
printf((j%5 ? ",\t": "\n"));
}
}
printf("\nFROM HOST:\n");
for(int i=0; i<size; ++i){
for(int j=0; j<size; ++j){
printf("%f",*(h_matrix + (i * size) + j));
printf((j%5 ? ",\t": "\n"));
}
}
*/
printf("\nTHIS IS THE RESULT: %d \n", h_result_cnt);
for(int i=0; i<size; ++i){
printf("%d\n",*(h_result + i));
//printf((i%50)? ", ": "\n");
}
// free GPU memory allocation
cudaFree(d_matrix);
cudaFree(d_result);
return 0;
}
//expects "matrix" to of shape (size X size)
void fillUpMatrix(float *matrix, int size)
{
srand((unsigned int)time(NULL));
for(int i=0; i<size; ++i){
for(int j=0; j<size; ++j){
float rnd1 = (float)rand()/(float)(RAND_MAX/2.55);
float rnd2 = (float)rand()/RAND_MAX;
float randValue = (rnd2<0.1) ? 0.0 : rnd1-rnd2;
*(matrix + (i * size) + j) = randValue;
}
}
}
__device__ int getGlobalIdx(){
return blockIdx.x *blockDim.x + threadIdx.x;
}
__global__ void get_negative_columns(float *d_matrix, int *d_result, int size){
int idx = getGlobalIdx();
//d_result[idx] = 3030;
//printf("Hello World! I'm a thread in thread %d\n", idx);
///*
//int idx = vlockIdx.x * blockDim.x + threadIdx.x;
//int idx = blockDim.x + threadIdx.x;
int zeros = 0;
int negs = 0;
for (size_t i = 0; i < size; i++)
{
float value = *(d_matrix + (idx * size) +i);
if (value == 0.0){ zeros++; }
else if (value < 0.0){ negs++; }
}
if (zeros * 2 >= negs)
{
int my_idx = atomicAdd(&d_next_idx, 1);
if ((my_idx + 1) < size)
{
d_result[my_idx++] = (idx==0) ? -2 : idx; //save the column idx; 0 == -2
// atomicAdd(d_result_cnt, 1); //increment the count
}
/*}else{
int my_idx = atomicAdd(&d_next_idx, 1);
if ((my_idx + 1) < size)
{
d_result[my_idx++] = -1;
}
*/
}
//
//*/
}
|
7,629 | #define _POSIX_C_SOURCE 200809L
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
#include <stdbool.h>
#include <cuda_runtime.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
// cuda macro for ensuring cuda errors are logged
#define __cuda__(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr, "CUDA-Assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/* KERNEL: Populate the addends array with the set of stress addends (values when summed will equal the stress value)
*/
__global__ void generateStressAddends(float* Delta, float* D, double* addends, double weight, int dataRows) {
for (unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
ix < (dataRows * (dataRows + 1) / 2);
ix += blockDim.x * gridDim.x
){
// generate 2D indeces from 1D index, ix, in flattened matrix.
int i = ix / (dataRows + 1);
int j = ix % (dataRows + 1);
// if generated indeces lie outside of lower triangle, generate new ones inside it
if (j > i) {
i = dataRows - i - 1;
j = dataRows - j;
}
// generate and insert stress addend into array for later summation
double n = 0.0f;
if (i != j) {
n = (double)Delta[(i * dataRows) + j] - (double)D[(i * dataRows) + j]; //use doubles to preserve precision
}
addends[ix] = (weight*(n*n));
}
}
/* Compute stress with the aid of the gpu
*/
double computeStress(float* Delta, float* D, size_t size_D, double weight, int m, int blocks, int threads){
size_t lowerTriangleSize = ((m * (m + 1)) / 2);
float* cuda_Delta;
float* cuda_D;
// create array of stress addends
double* cuda_stressAddends;
__cuda__( cudaMalloc(&cuda_Delta, size_D) );
__cuda__( cudaMalloc(&cuda_D, size_D) );
__cuda__( cudaMalloc(&cuda_stressAddends, (lowerTriangleSize * sizeof(double))) );
__cuda__( cudaMemcpy(cuda_Delta, Delta, size_D, cudaMemcpyHostToDevice) );
__cuda__( cudaMemcpy(cuda_D, D, size_D, cudaMemcpyHostToDevice) );
generateStressAddends<<<blocks, threads>>>(cuda_Delta, cuda_D, cuda_stressAddends, weight, m);
__cuda__( cudaPeekAtLastError() );
__cuda__( cudaDeviceSynchronize() );
__cuda__( cudaFree(cuda_Delta) );
__cuda__( cudaFree(cuda_D) );
//sum reduction on all stress addends
thrust::device_ptr<double> d_ptr = thrust::device_pointer_cast(cuda_stressAddends);
__cuda__( cudaPeekAtLastError() );
double stress = thrust::reduce(d_ptr, (d_ptr + lowerTriangleSize));
__cuda__( cudaDeviceSynchronize() );
__cuda__( cudaFree(cuda_stressAddends) );
return stress;
}
/* KERNEL: Populate the addends array with the set of normalized stress weight denominator values
*/
__global__ void generateNormalizedStressDenominatorAddends(float* Delta, double* addends, int dataRows) {
for (unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
ix < (dataRows * (dataRows + 1) / 2);
ix += blockDim.x * gridDim.x
){
// generate 2D indeces from 1D index, ix, in flattened matrix.
int i = ix / (dataRows + 1);
int j = ix % (dataRows + 1);
// if generated indeces lie outside of lower triangle, generate new ones inside it
if (j > i) {
i = dataRows - i - 1;
j = dataRows - j;
}
// generate and insert stress weight denominator addend into array for later summation
if (i != j) {
addends[ix] = (double)Delta[(i * dataRows) + j] * (double)Delta[(i * dataRows) + j]; //use doubles to preserve precision
} else {
addends[ix] = 0.0f;
}
}
}
/* Computes normalized stress with the aid of the gpu
*/
double computeNormalizedStress(float* Delta, float* D, size_t size_D, int m, int blocks, int threads) {
size_t lowerTriangleSize = ((m * (m + 1)) / 2);
float* cuda_Delta;
float* cuda_D;
// create array of normalized stress denominator addends
double* cuda_denominatorAddends;
__cuda__( cudaMalloc(&cuda_Delta, size_D) );
__cuda__( cudaMalloc(&cuda_denominatorAddends, (lowerTriangleSize * sizeof(double))) );
__cuda__( cudaMemcpy(cuda_Delta, Delta, size_D, cudaMemcpyHostToDevice) );
generateNormalizedStressDenominatorAddends<<<blocks, threads>>>(cuda_Delta, cuda_denominatorAddends, m);
__cuda__( cudaPeekAtLastError() );
__cuda__( cudaDeviceSynchronize() );
//sum reduction on all normalized stress weight denominator addends
thrust::device_ptr<double> d_ptr = thrust::device_pointer_cast(cuda_denominatorAddends);
__cuda__( cudaPeekAtLastError() );
double weight = 1.0f / thrust::reduce(d_ptr, (d_ptr + lowerTriangleSize));
__cuda__( cudaDeviceSynchronize() );
__cuda__( cudaFree(cuda_denominatorAddends) );
// create array of normalized stress addends
double* cuda_stressAddends;
__cuda__( cudaMalloc(&cuda_D, size_D) );
__cuda__( cudaMalloc(&cuda_stressAddends, (lowerTriangleSize * sizeof(double))) );
__cuda__( cudaMemcpy(cuda_D, D, size_D, cudaMemcpyHostToDevice) );
generateStressAddends<<<blocks, threads>>>(cuda_Delta, cuda_D, cuda_stressAddends, weight, m);
__cuda__( cudaPeekAtLastError() );
__cuda__( cudaDeviceSynchronize() );
__cuda__( cudaFree(cuda_Delta) );
__cuda__( cudaFree(cuda_D) );
//sum reduction on all normalized stress addends
d_ptr = thrust::device_pointer_cast(cuda_stressAddends);
__cuda__( cudaPeekAtLastError() );
double stress = thrust::reduce(d_ptr, (d_ptr + lowerTriangleSize));
__cuda__( cudaDeviceSynchronize() );
__cuda__( cudaFree(cuda_stressAddends) );
return stress;
}
/* Computes normalized stress without the aid of the gpu.
*/
double computeNormalizedStressSerial(float* Delta, float* D, int m) {
double stress = 0.0f;
double weight = 0.0f;
for(int i = 0; i < m; i++) {
for(int j = i+1; j < m; j++) {
weight += (Delta[(i*m)+j] * Delta[(i*m)+j]);
}
}
weight = 1.0f/weight;
for(int i = 0; i < m; i++) {
for(int j = i+1; j < m; j++) {
double n = (Delta[(i*m)+j] - D[(i*m)+j]);
stress += (n*n);
}
}
stress *= weight;
return stress;
} |
7,630 | #include "includes.h"
__global__ void msecost(float* predictions, float* target, int size, float* cost) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
float partial_cost = (predictions[index] - target[index]) * (predictions[index] - target[index]);
atomicAdd(cost, partial_cost / size);
}
} |
7,631 | #include<iostream>
#include "cuda.h"
#include "assert.h"
#include <chrono>
#define N (1<<11)
void local_execute(int *c, int *local_a, int *local_b){
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
int s =0;
for(int k=0;k<N;k++){
c[i*N+j] = s + local_a[i*N + k]*local_b[j + k*N];
}
}
}
}
//
//int verify(int *c){
// int error = 0;
// for(int i=0;i<size;i++){
// error = error + abs(4-c[i]);
// }
// return error;
//}
//
void check_error(cudaError_t e){
assert(e == cudaSuccess);
}
//
__global__ void matmul_gpu(int *a,int *b,int *c){
int i = blockIdx.x;
for(int j=0;j<N;j++){
int s =0;
for(int k=0;k<N;k++){
c[i*N+j] = s + a[i*N + k]*b[j + k*N];
}
}
}
//
void gpu_execute(int *local_a, int* local_b, int *local_c){
int *a,*b,*c;
check_error(cudaMalloc(&a, N * N * sizeof(int)));
check_error(cudaMalloc(&b, N * N * sizeof(int)));
check_error(cudaMalloc(&c, N * N * sizeof(int)));
check_error(cudaMemcpy(a,local_a,N * N * sizeof(int),cudaMemcpyHostToDevice));
check_error(cudaMemcpy(b,local_b,N * N * sizeof(int),cudaMemcpyHostToDevice));
matmul_gpu<<<N,1>>>(a,b,c);
check_error(cudaMemcpy(local_c,c,N * N * sizeof(int),cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
/*
* Current Speed UP:
* GPU Run time 3904ms
* Local run time 80398ms
*/
int main(){
int * local_a = (int *)malloc(sizeof(int) * N * N);
int * local_b = (int *)malloc(sizeof(int) * N * N);
int * c = (int *)malloc(sizeof(int) * N * N);
for(int i=0;i<N;i++){
local_a[i]= 1;
local_b[i]= 1;
}
std::cout << "Matrix Size" << ((N * N * 4)/(1<<20)) <<"MB\n";
auto start_time = std::chrono::high_resolution_clock::now();
gpu_execute(c,local_a,local_b);
auto end_time = std::chrono::high_resolution_clock::now();
std::cout << "GPU Run time " << (end_time - start_time)/std::chrono::milliseconds(1) <<"ms \n";
start_time = std::chrono::high_resolution_clock::now();
local_execute(c,local_a,local_b);
end_time = std::chrono::high_resolution_clock::now();
std::cout << "Local run time " << (end_time - start_time)/std::chrono::milliseconds(1) <<"ms \n";
free(local_a);
free(local_b);
free(c);
// gpu_execute(local_a,local_b,c);
// std::cout << "Max Error" << verify(c) <<"\n";
}
|
7,632 | #include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include "Kernels/incSuperKernel.cu"
#include <pthread.h>
/////////////////////////////////////////////////////////////////
// Global Variables
/////////////////////////////////////////////////////////////////
void printAnyErrors()
{
cudaError_t e = cudaGetLastError();
printf("CUDA error: %s \n", cudaGetErrorString(e));
}
////////////////////////////////////////////////////////////////////
// The Main
////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
cudaStream_t stream_kernel, stream_dataIn, stream_dataOut;
cudaStreamCreate(&stream_kernel);
cudaStreamCreate(&stream_dataIn);
cudaStreamCreate(&stream_dataOut); //currently these arent used
int size = 5;
int* h_init = (int*)malloc((size+1)*sizeof(int));
int* h_result = (int*)malloc((size+1)*sizeof(int));
int* d_init;
cudaMalloc(&d_init, (size+1)*sizeof(int));
int* d_result;
cudaMalloc(&d_result, (size+1)*sizeof(int));
h_init[0]=0; //set the data ready flag to false
cudaMemcpyAsync(d_init, h_init, sizeof(int), cudaMemcpyHostToDevice,stream_dataIn);
cudaStreamSynchronize(stream_dataIn);
h_result[0]=0; //set the data ready flag to false
cudaMemcpyAsync(d_result, h_result, sizeof(int), cudaMemcpyHostToDevice,stream_dataOut);
cudaStreamSynchronize(stream_dataOut);
dim3 threads(32, 1);
dim3 grid(1, 1);
printf("launching SuperKernel\n");
// call the cudaMatrixMul cuda function
superKernel<<< grid, threads, 0, stream_kernel>>>(d_init, size, d_result);
//PRINT HERE
printAnyErrors();
//Make inputs and transfer them
int j;
for(j=1;j<size+1;j++)h_init[j] = j;
printf("launching cudaMemcpy Data\n");
cudaMemcpyAsync(&d_init[1], &h_init[1], size*sizeof(int), cudaMemcpyHostToDevice, stream_dataIn);
cudaStreamSynchronize(stream_dataIn);
//PRINT HERE
printAnyErrors();
//Mark flag as ready
printf("launching cudaMemcpy Flag\n");
h_init[0]=7;
cudaMemcpyAsync(d_init, h_init, sizeof(int), cudaMemcpyHostToDevice,stream_dataIn);
cudaStreamSynchronize(stream_dataIn);
//wait for result flag to be on
while(h_result[0]==0) { cudaMemcpyAsync(h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost, stream_dataOut);
cudaStreamSynchronize(stream_dataOut);
printf("got value h_result[0]: %d\n", h_result[0]); }
//PRINT HERE
printAnyErrors();
//Get and print results
cudaMemcpyAsync(&h_result[1], &d_result[1], size*sizeof(int), cudaMemcpyDeviceToHost, stream_dataOut);
cudaStreamSynchronize(stream_dataOut);
int i;
for(i=0; i<size; i++) printf("intial value: %d\t final value: %d\n", h_init[i+1], h_result[i+1]);
//PRINT HERE
printAnyErrors();
return 0;
}
|
7,633 | #include <iostream>
#include <stdio.h>
#include <ctime>
using namespace std;
void initIndex(unsigned long long *index, unsigned long long n)
{
for(unsigned long long i=0; i<n; i++)
index[i]=i;
for(unsigned long long i=0; i<n; i++)
{
unsigned long long j = rand()%n;
swap(index[i],index[j]);
}
}
void initArray(unsigned long long *a, unsigned long long n)
{
if(n < (2<<10))
for(int i = 0; i < n; i++)
a[i] = rand()%1001 + 1;
else
for(int i = 0; i < n; i++)
a[i] = rand()%(n+1) + 1;
}
__global__ void somaPolinomio(unsigned long long *a, unsigned long long *b, unsigned long long n, unsigned long long *c)
{
unsigned long long idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx < n)
{
unsigned int c1=0,c2=0;
asm("mov.u32 %0,%%clock;":"=r"(c1));
#pragma unroll
for(int i = 0; i < 4; i++)
{
int pos = idx*4+i;
c[pos] = 5*(a[pos]*a[pos]*a[pos]) + 7*a[pos]*b[pos] + 8*b[pos]*b[pos] - b[pos] ;
}
asm("mov.u32 %0,%%clock;":"=r"(c2));
if(idx == 0)
printf("soma polinomio : %u ms\n",c2-c1);
}
return;
}
__global__ void somaVetor(unsigned long long *a, unsigned long long *b, unsigned long long n, unsigned long long *c)
{
unsigned long long idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx < n)
{
unsigned int c1=0,c2=0;
asm("mov.u32 %0,%%clock;":"=r"(c1));
for(int i = 0; i < 4; i++)
{
int pos = idx*4+i;
c[pos] = a[pos] + b[pos];
}
asm("mov.u32 %0,%%clock;":"=r"(c2));
if(idx == 0)
printf("soma vetor : %u ms\n",c2-c1);
}
return;
}
__global__ void VetorRandom(unsigned long long *a, unsigned long long *b, unsigned long long n, unsigned long long *c, unsigned long long *index)
{
unsigned long long idx = blockDim.x*blockIdx.x + threadIdx.x;
if(idx < n)
{
//Acesso Sequencial
unsigned int c1=0,c2=0;
asm("mov.u32 %0,%%clock;":"=r"(c1));
int pos = idx;
c[pos] = a[pos] + b[pos];
asm("mov.u32 %0,%%clock;":"=r"(c2));
//Acesso Aleatorio
if(idx == 0)
printf("Acessa Vetor Sequencial : %u ms\n",c2-c1);
c1=0;c2=0;
asm("mov.u32 %0,%%clock;":"=r"(c1));
c[index[pos]] = a[index[pos]] + b[index[pos]];
asm("mov.u32 %0,%%clock;":"=r"(c2));
if(idx == 0)
printf("Acessa Vetor Random : %u ms\n",c2-c1);
}
return;
}
int main(int argc, char **argv)
{
srand(time(NULL));
//TAMANHOS PARA TESTAR : 1M, 2M, 10M, 20M, 32M
unsigned long long n = 0;
for(int i = 0; argv[1][i] != '\0'; i++)
n = n*10 + (argv[1][i]-'0');
//alocando vetores da CPU
unsigned long long * h_a = new unsigned long long[n];
unsigned long long * h_b = new unsigned long long[n];
unsigned long long * h_c = new unsigned long long[n];
unsigned long long * h_index = new unsigned long long[n];
initArray(h_a,n);
initArray(h_b,n);
initIndex(h_index,n);
//alocando vetores
unsigned long long * d_a, *d_b, *d_c;
cudaMalloc(&d_a,sizeof(unsigned long long)*n);
cudaMalloc(&d_b,sizeof(unsigned long long)*n);
cudaMalloc(&d_c,sizeof(unsigned long long)*n);
unsigned long long * d_index;
cudaMalloc(&d_index,sizeof(unsigned long long)*n);
//copiando valores da CPU para a GPU
cudaMemcpy(d_a, h_a, sizeof(unsigned long long)*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(unsigned long long)*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_index, h_index, sizeof(unsigned long long)*n,cudaMemcpyHostToDevice);
dim3 block,grid;
//tamanho do bloco é arbitrário
block.x = 1024;
grid.x = ((n/4 + block.x -1)/block.x);
//chama o somaVetor
somaVetor<<<grid,block>>>(d_a,d_b,n,d_c);
cudaDeviceSynchronize();
//traz o resultado da GPU para a CPU
cudaMemcpy(h_c, d_c, sizeof(unsigned long long)*n,cudaMemcpyDeviceToHost);
//debug somaVetor
/*for(int i = 0; i < n; i++)
cout << h_a[i] << " ";
cout << "\n";
for(int i = 0; i < n; i++)
cout << h_b[i] << " ";
cout << "\n";
for(int i = 0; i < n; i++)
cout << h_c[i] << " ";
cout << "\n";*/
//chama o somaPolinomio
somaPolinomio<<<grid,block>>>(d_a,d_b,n,d_c);
cudaDeviceSynchronize();
//chama VetorRandom
VetorRandom<<<grid,block>>>(d_a,d_b,n,d_c,d_index);
cudaDeviceSynchronize();
cudaMemcpy(h_c, d_c, sizeof(unsigned long long)*n,cudaMemcpyDeviceToHost);
//debug somaVetor
/*for(int i = 0; i < n; i++)
cout << h_a[i] << " ";
cout << "\n";
for(int i = 0; i < n; i++)
cout << h_b[i] << " ";
cout << "\n";
for(int i = 0; i < n; i++)
cout << h_c[i] << " ";
cout << "\n";*/
//desalocando memória
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//reseta a GPU
cudaDeviceReset();
return 0;
}
|
7,634 | #include <iostream>
#include <vector>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void matAdd(int * m0_d, int * m1_d, std::size_t w, std::size_t h){
auto tidx = blockIdx.x * blockDim.x + threadIdx.x;
auto tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(tidx<w && tidy<h){
m0_d[tidy * w + tidx] += m1_d[tidy * w + tidx];
}
}
int main(){
std::size_t w = 10;
std::size_t h = 10;
std::size_t size = w*h;
std::vector<int> m0_h(size);
std::vector<int> m1_h(size);
int * m0_d = nullptr;
int * m1_d = nullptr;
for(std::size_t i = 0; i < size; i++){
m0_h[i] = m1_h[i] = i;
}
cudaError_t err;
err = cudaMalloc(&m0_d, m0_h.size() * sizeof(int));
if(err != cudaSuccess){
std::cerr << cudaGetErrorString(err) << std::endl;
return 1;
}
err = cudaMalloc(&m1_d, m1_h.size() * sizeof(int));
cudaMemcpy(m0_d,m0_h.data(),m0_h.size() * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(m1_d,m1_h.data(),m1_h.size() * sizeof(int),cudaMemcpyHostToDevice);
dim3 block(32,32);
dim3 grid((w-1)/ block.x +1, (h-1)/ block.y +1);
matAdd<<<grid,block>>>(m0_d,m1_d,w,h);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess){
std::cerr << cudaGetErrorString(err) << std::endl;
return 1;
}
cudaMemcpy(m0_h.data(),m0_d,m0_h.size() * sizeof(int),cudaMemcpyDeviceToHost);
for(std::size_t i = 0; i < m0_h.size(); i++){
printf("%d\n",m0_h[i] );
}
cudaFree(m0_d);
cudaFree(m1_d);
}
|
7,635 | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include <cuda.h>
typedef struct {
float x,y,z,w;
} myfloat4;
__global__ void k() {
myfloat4 f4;
float i0 = f4.x;
}
|
7,636 | #include "includes.h"
__global__ void returnQM ( const int dim, const int n, const float *s1, const float *s0, float *q ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < n ) {
q[i] = expf ( - 0.5 * ( s1[i] - s0[i] ) );
}
} |
7,637 | // Vector version
__global__ void Iteration(double *Xreal, double *Ximag,
const double creal, const double cimag, const double N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// int i = threadIdx.x; // if using just one block
int k;
double temp;
if (i < N) {
for (k = 0; k < 100; k++){
temp = 2 * Xreal[i] * Ximag[i] + cimag;
Xreal[i] = Xreal[i] * Xreal[i] - Ximag[i] * Ximag[i] + creal;
Ximag[i] = temp;
}
// Xreal is the only output that needs to be retrieved
Xreal[i] = exp(-sqrt(Xreal[i] * Xreal[i] + Ximag[i] * Ximag[i]));
}
}
|
7,638 | #include "RadixSort.cuh"
__device__ void radixSort(u32* const sort_tmp, u32* const sort_tmp_1, const u32 num_list, const u32 num_elements, const u32 tid)
{
//Sort into num_list, lists
//Apply RadixSort on 32 bits of data
for(u32 bit=0;bit<32;bit++)
{
const u32 bit_mask = (1 << bit);
u32 base_cnt_0 = 0;
u32 base_cnt_1 = 0;
for(u32 i=0;i<num_elements;i+=num_list)
{
const u32 elem = sort_tmp[i+tid];
if((elem & bit_mask) > 0)
{
sort_tmp_1[base_cnt_1+tid] = elem;
base_cnt_1+=num_list;
}
else
{
sort_tmp[base_cnt_0+tid] = elem;
base_cnt_0+=num_list;
}
}
// Copy data to source from one list
for(u32 i=0;i<base_cnt_1;i+=num_list)
{
sort_tmp[base_cnt_0+i+tid] = sort_tmp_1[i+tid];
}
}
__syncthreads();
}
__device__ void copyDataToShared(u32* const data, u32* const sort_tmp, const u32 num_list, const u32 num_elements, const u32 tid)
{
// Copy data into temp store (shared memory)
for(u32 i=0;i<num_elements;i+=num_list)
{
sort_tmp[i+tid] = data[i+tid];
}
__syncthreads();
}
__device__ void mergeArrays1(const u32 * const src_array, u32* const dest_array, const u32 num_list, const u32 num_elements, const u32 tid)
{
__shared__ u32 list_indexes[LISTS];
//Multiple threads
list_indexes[tid] = 0;
__syncthreads();
//Single thread
if(tid==0)
{
const u32 num_elements_per_list = num_elements / num_list;
for(u32 i=0;i<num_elements;i++)
{
u32 min_val = 0xFFFFFFFF;
u32 min_idx = 0;
//Iterate over each of the lists
for(u32 list=0;list<num_list;list++){
//If current list have already been emptied, then ignore it
if(list_indexes[list] < num_elements_per_list){
const u32 src_idx = list + (list_indexes[list] * num_list);
const u32 data = src_array[src_idx];
if(data <= min_val){
min_val = data;
min_idx = list;
}
}
}
list_indexes[min_idx]++;
dest_array[i] = min_val;
}
}
}
__device__ void mergeArrays6(const u32 * const src_array, u32* const dest_array, const u32 num_list, const u32 num_elements, const u32 tid)
{
const u32 num_elements_per_list = num_elements / num_list;
// Shared memory declared for all (32) threads within warp! 48 KB
__shared__ u32 list_indexes[LISTS];
// Each thread set its list_indexes[]
list_indexes[tid] = 0;
// Wait for list_indexes[] to be cleared by all threads - program waits until all threads get to this moment
__syncthreads();
// Iterate over all elements
for(u32 i = 0; i<N; i++)
{
//create value shared with other threads
__shared__ u32 min_val;
__shared__ u32 min_tid;
// Registe for work purposes
u32 data;
// If current list havent been cleared
if(list_indexes[tid] < num_elements_per_list)
{
// Work out from the list_indexes, the index inside linear array
const u32 std_idx = tid + (list_indexes[tid] * num_list);
// Read data from the given list into local register for thread
data = src_array[std_idx];
}
else
{
data = 0xFFFFFFFF;
}
// Thread 0 clears min_val and min_tid values, so the first thread wins minimum
if(tid==0){
min_val = 0xFFFFFFFF;
min_tid = 0xFFFFFFFF;
}
// Wait for all threads
__syncthreads();
// Have every thread try to store its value into min_val. Only thread with loest value will win
atomicMin(&min_val, data);
__syncthreads();
// Check for lowest tid for threads with lowest values(if equal)
if(min_val == data){
atomicMin(&min_tid, tid);
}
__syncthreads();
//If this thread has the lowest tid
if(tid == min_tid){
list_indexes[tid]++;
dest_array[i] = data;
}
}
}
__global__ void __radixSort__(u32* const data, const u32 num_list, const u32 num_elements)
{
const u32 tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ u32 sort_tmp[N];
__shared__ u32 sort_tmp_1[N];
copyDataToShared(data, sort_tmp, num_list, num_elements, tid);
radixSort(sort_tmp, sort_tmp_1, num_list, num_elements, tid);
// mergeArrays1(sort_tmp, data, num_list, num_elements, tid); // Serial merging
mergeArrays6(sort_tmp, data, num_list, num_elements, tid); // Pralel merging - 3x faster
}
void RadixSort::RunKernelGPU(){
__radixSort__ <<<BLOCKS, THREADS>>> (d_array, (int)LISTS, (int)N);
cudaDeviceSynchronize();
}
RadixSort::RadixSort(){
h_array = (u32*)malloc(N*sizeof(u32));
srand(time(NULL));
for(u32 i=0;i<N;i++){
h_array[i] = rand()%1024;
}
cudaMalloc((void**)&d_array, N*sizeof(u32));
cudaMemcpy(d_array, h_array, N*sizeof(u32), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
RadixSort::~RadixSort(){
cudaFree(d_array);
free(h_array);
}
void RadixSort::CopyResults(){
cudaMemcpy(h_array, d_array, N*sizeof(u32), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
|
7,639 | extern "C"
__global__
void sigmoidDerivative(float *activation, unsigned int length)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < length;
i += blockDim.x * gridDim.x)
{
activation[i]=1.0f/(1.0f+__expf(-activation[i]));
activation[i]=activation[i]*(1.0f-activation[i]);
//activation[i]=1.0f/(1.0f+expf(-activation[i]));
//activation[i]=activation[i]*(1.0f-activation[i]);
//activation[i]=fabsf(activation[i]);
//activation[i]=1.0f/(2.0f*activation[i]*activation[i]);
}
} |
7,640 | /* Voxel sampling GPU implementation
* Author Zhaoyu SU
* All Rights Reserved. Sep., 2019.
*/
#include <stdio.h>
#include <iostream>
#include <vector>
#include <float.h>
__device__ inline int get_batch_id(int* accu_list, int batch_size, int id) {
for (int b=0; b<batch_size-1; b++) {
if (id >= accu_list[b]) {
if(id < accu_list[b+1])
return b;
}
}
return batch_size - 1;
}
__global__ void output_init_gpu_kernel(int batch_size, int center_num, int kernel_num,
int output_pooling_size,
int* output_idx) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < center_num * kernel_num) {
for (int p=0; p<output_pooling_size; p++) {
output_idx[thread_id*output_pooling_size + p] = -1;
}
}
}
__global__ void grid_buffer_init_gpu_kernel(int batch_size, int input_point_num,
int grid_dim_w, int grid_dim_l, int grid_dim_h,
float resolution_w, float resolution_l, float resolution_h,
int grid_buffer_size,
const float* input_coors,
int* input_accu_list,
int* grid_buffer,
int* grid_buffer_count) {
const int grid_dim_size = grid_dim_w * grid_dim_h * grid_dim_l;
int point_id = threadIdx.x + blockIdx.x * blockDim.x;
if (point_id < input_point_num) {
int center_grid_coor_x = (int)floor(input_coors[point_id*3 + 0] / resolution_w);
int center_grid_coor_y = (int)floor(input_coors[point_id*3 + 1] / resolution_l);
int center_grid_coor_z = (int)floor(input_coors[point_id*3 + 2] / resolution_h);
int batch_id = get_batch_id(input_accu_list, batch_size, point_id);
int grid_buffer_idx = batch_id * grid_dim_size + center_grid_coor_x * grid_dim_l * grid_dim_h + center_grid_coor_y * grid_dim_h + center_grid_coor_z;
int count = atomicAdd(&grid_buffer_count[grid_buffer_idx], 1);
// printf("%d\n", count);
if (count < grid_buffer_size) {
grid_buffer[grid_buffer_idx*grid_buffer_size + count] = point_id;
}
// atomicExch(&grid_buffer[grid_buffer_idx], point_id);
}
}
__global__ void voxel_sampling_idx_gpu_kernel(int batch_size, int center_num,
int kernel_size,
int grid_dim_w, int grid_dim_l, int grid_dim_h,
float resolution_w, float resolution_l, float resolution_h,
int grid_buffer_size, int output_pooling_size, bool with_rpn,
const float* input_coors,
const float* center_coors,
int* center_accu_list,
int* grid_buffer,
int* grid_buffer_count,
int* output_idx,
int* output_idx_count,
int* valid_idx) {
const int kernel_num = kernel_size * kernel_size * kernel_size;
const int half_kernel_size = (kernel_size - 1) / 2;
const int half_kernel_num = kernel_size * kernel_size * half_kernel_size + \
kernel_size * half_kernel_size + \
half_kernel_size;
const int search_kernel_size = kernel_size + 1;
const int search_kernel_num = search_kernel_size * search_kernel_size * search_kernel_size;
const int grid_dim_size = grid_dim_w * grid_dim_l * grid_dim_h;
const float radius_x = 1.5 * resolution_w;
const float radius_y = 1.5 * resolution_l;
const float radius_z = 1.5 * resolution_h;
const float r_x2 = radius_x * radius_x;
const float r_y2 = radius_y * radius_y;
const float r_z2 = radius_z * radius_z;
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_id < center_num * search_kernel_num) {
int center_id = thread_id / search_kernel_num;
int search_grid_id = thread_id % search_kernel_num;
int batch_id = get_batch_id(center_accu_list, batch_size, center_id);
float center_coor_x = center_coors[center_id*3 + 0];
float center_coor_y = center_coors[center_id*3 + 1];
float center_coor_z = center_coors[center_id*3 + 2];
int center_grid_coor_x = __float2int_rz(center_coor_x / resolution_w);
int center_grid_coor_y = __float2int_rz(center_coor_y / resolution_l);
int center_grid_coor_z = __float2int_rz(center_coor_z / resolution_h);
int search_grid_x = search_grid_id / (search_kernel_size * search_kernel_size);
int search_grid_y = search_grid_id % (search_kernel_size * search_kernel_size) / search_kernel_size;
int search_grid_z = search_grid_id % search_kernel_size;
int search_offset_x = -2 + round(center_coor_x / resolution_w - center_grid_coor_x) + search_grid_x;
int search_offset_y = -2 + round(center_coor_y / resolution_l - center_grid_coor_y) + search_grid_y;
int search_offset_z = -2 + round(center_coor_z / resolution_h - center_grid_coor_z) + search_grid_z;
int target_grid_x = max(0, min(center_grid_coor_x + search_offset_x, grid_dim_w - 1));
int target_grid_y = max(0, min(center_grid_coor_y + search_offset_y, grid_dim_l - 1));
int target_grid_z = max(0, min(center_grid_coor_z + search_offset_z, grid_dim_h - 1));
int target_grid_id = batch_id * grid_dim_size + target_grid_x * grid_dim_l * grid_dim_h + target_grid_y * grid_dim_h + target_grid_z;
for (int p=0; p<grid_buffer_size; p++) {
int point_id = grid_buffer[target_grid_id*grid_buffer_size + p];
if (point_id>=0) {
float coor_x = input_coors[point_id*3 +0];
float coor_y = input_coors[point_id*3 +1];
float coor_z = input_coors[point_id*3 +2];
float dx = coor_x - center_coor_x + FLT_EPSILON;
float dy = coor_y - center_coor_y + FLT_EPSILON;
float dz = coor_z - center_coor_z + FLT_EPSILON;
float dx2 = dx * dx;
float dy2 = dy * dy;
float dz2 = dz * dz;
if (dx2 < r_x2 && dy2 < r_y2 && dz2 < r_z2) {
int kernel_coor_x = __float2int_rz(dx / resolution_w + 0.5 * fabsf(dx) / dx);
int kernel_coor_y = __float2int_rz(dy / resolution_l + 0.5 * fabsf(dy) / dy);
int kernel_coor_z = __float2int_rz(dz / resolution_h + 0.5 * fabsf(dz) / dz);
int voxel_coor = center_id * kernel_num + half_kernel_num + \
kernel_size * kernel_size * kernel_coor_x + \
kernel_size * kernel_coor_y + \
kernel_coor_z;
int pooling_count = atomicAdd(&output_idx_count[voxel_coor], 1);
if (pooling_count < output_pooling_size) {
output_idx[voxel_coor*output_pooling_size + pooling_count] = point_id;
if (with_rpn)
atomicAdd(&valid_idx[center_id], 1);
}
}
}
}
}
}
void voxel_sampling_idx_gpu_launcher(int batch_size, int input_point_num,
int center_num, int kernel_size,
int grid_dim_w, int grid_dim_l, int grid_dim_h, std::vector<float> resolution,
int grid_buffer_size, int output_pooling_size, bool with_rpn,
const float* input_coors,
const int* input_num_list,
const float* center_coors,
const int* center_num_list,
int* input_accu_list,
int* center_accu_list,
int* grid_buffer,
int* grid_buffer_count,
int* output_idx,
int* output_idx_count,
int* valid_idx) {
if (batch_size*input_point_num <=0 || center_num <= 0) {
printf("VoxelSampleOp ERROR: Invalid CUDA input dimensions.\n");
return;
}
int kernel_num = kernel_size * kernel_size * kernel_size;
int search_kernel_num = (kernel_size + 1) * (kernel_size + 1) * (kernel_size + 1);
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, output_init_gpu_kernel, 0, center_num * kernel_num);
gridSize = (center_num * kernel_num + blockSize - 1) / blockSize;
output_init_gpu_kernel<<<gridSize, blockSize>>>(batch_size, center_num, kernel_num,
output_pooling_size,
output_idx);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, grid_buffer_init_gpu_kernel, 0, input_point_num);
gridSize = (input_point_num + blockSize - 1) / blockSize;
grid_buffer_init_gpu_kernel<<<gridSize, blockSize>>>(batch_size, input_point_num,
grid_dim_w, grid_dim_l, grid_dim_h,
resolution[0], resolution[1], resolution[2],
grid_buffer_size,
input_coors,
input_accu_list,
grid_buffer,
grid_buffer_count);
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, voxel_sampling_idx_gpu_kernel, 0, center_num * search_kernel_num);
gridSize = (center_num * search_kernel_num + blockSize - 1) / blockSize;
voxel_sampling_idx_gpu_kernel<<<gridSize, blockSize>>>(batch_size, center_num,
kernel_size,
grid_dim_w, grid_dim_l, grid_dim_h,
resolution[0], resolution[1], resolution[2],
grid_buffer_size, output_pooling_size, with_rpn,
input_coors,
center_coors,
center_accu_list,
grid_buffer,
grid_buffer_count,
output_idx,
output_idx_count,
valid_idx);
}
|
7,641 | #include <stdlib.h>
#include <stdio.h>
#define FILENAME "./dblp-co-authors.txt"
#define NumAuthor 317080
#define DataLen 1049866
#define BlockSize 1024
#define GridSize int(DataLen/BlockSize) + 1
int dataset[DataLen * 2];// array to store the raw dataset
void dataset_read(int * dataset);
__global__ void dataset_parse(int * dataset, int * output);
int dataset_maxCoAuthor(int * output, int lenght);
void dataset_plot(int * output, int lenght, int max);
int main(int argc, char * argv[])
{
int output[NumAuthor] = { 0 };
int * cu_output;//array to store the co-authors number of each author
dataset_read(dataset);
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
float time;
int * cu_dataset;
cudaEventRecord(start,0);
cudaMalloc((void**)&cu_output, NumAuthor * sizeof(int) );
cudaMalloc((void**)&cu_dataset, DataLen * 2 * sizeof(int));
cudaMemcpy(cu_dataset, dataset, DataLen * 2 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cu_output, output, NumAuthor * sizeof(int), cudaMemcpyHostToDevice);
dataset_parse<<<GridSize, BlockSize>>>(cu_dataset, cu_output);
cudaDeviceSynchronize();
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&time, start, stop);
cudaMemcpy(output, cu_output, NumAuthor * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventElapsedTime(&time, start, stop);
int max = dataset_maxCoAuthor(output, NumAuthor);
printf("Time elapsed: %f\n", time);
dataset_plot(output, NumAuthor, max);
return 0;
}
void dataset_read( int * dataset)
{
FILE * datafile;
datafile = fopen( FILENAME, "r");
char line[255];
while (true)
{
fscanf(datafile, "%s", line);
if (atoi(line) == 1)
{
dataset[0] = 1;
break;
}
}
for(int i = 1; i < 2 * DataLen; i++){
fscanf(datafile, "%d", &dataset[i]);
}
fclose(datafile);
}
__global__ void dataset_parse(int * dataset, int * output)
{
int indx = threadIdx.x + blockIdx.x * blockDim.x;
if(indx < DataLen){
atomicAdd(&(output[dataset[2*indx]-1]), 1);
atomicAdd(&(output[dataset[2*indx+1]-1]), 1);
//if (dataset[2*indx]-1 >= 315280)
// printf("index: %6d author:%6d output:%6d\n", indx,dataset[2*indx]-1, output[dataset[2*indx]-1]);
//if (dataset[2*indx+1]-1 >= 315280)
// printf("index: %6d author:%6d output:%6d\n", indx,dataset[2*indx+ 1]-1, output[dataset[2*indx+1]-1]);
}
}
int dataset_maxCoAuthor(int * output, int lenght)
{
int max =0;
int max_num = 0;
int max_ind[1000] = { 0 };
//memset(max_ind, 0, 1000);
for(int i = 0; i < lenght; i++)
{
//printf("output:%d, %d", i, output[i]);
if(max < output[i])
{
// printf("Max right now:%d, %d\n", i, output[i]);
max = output[i];
max_num = 0;
memset(max_ind, 0, 1000);
max_ind[max_num] = i;
}
else if(max == output[i])
{
max_num++;
max_ind[max_num] = i;
}
//else{
//printf("max is:%d, %d\n", max, max_ind[0]);
//}
}
printf("The list of authors with most co-authors:\n");
for(int i = 0; i <= max_num; i++)
{
printf("Author: %6d has %6d co-authors.\n", max_ind[i] + 1, output[max_ind[i]]);
}
return output[max_ind[0]];
}
void dataset_plot(int * output, int lenght, int max)
{
//int* numCoAuthorList;
int* numCoAuthorList = (int*)malloc(max * sizeof(int));
memset(numCoAuthorList, 0, max);
for(int i = 0; i < lenght; i++)
{
if(output[i] <= max)
{
numCoAuthorList[output[i] - 1]++;
}
else{
printf("\nError in Finding MAX!!!\n");
}
}
/*
int total = 0;
for(int i =0; i< max; i++)
{ total += numCoAuthorList[i];
printf("%6d\t",numCoAuthorList[i]);
}
printf("Total author:%d", total);
*/
FILE *fp;
fp = fopen("./output.txt", "wb");
fwrite(numCoAuthorList, sizeof(int), max, fp);
fclose(fp);
}
|
7,642 | // Not a real AppKernel
// Dummy Kernel for HPDC Paper
__device__ void MatrixMultiply(void *input)
{
// calibrate for warp size
int warp_size = 32;
int thread = threadIdx.x % warp_size;
// unbox the host parameters
float* inputParams = (float*)input;
int matrixWidth = inputParams[0];
float *matrixA = inputParams+1;
float *matrixB = matrixA + matrixWidth*matrixWidth;
float *matrixOut = matrixA + 2*matrixWidth*matrixWidth;
for (unsigned int i = thread; i < matrixWidth; i=i+32){
for (unsigned int j = 0; j < matrixWidth; j++) {
float sum = 0;
for (unsigned int k = 0; k < matrixWidth; k++) {
float a = matrixA[i * matrixWidth + k];
float b = matrixB[k * matrixWidth + j];
sum += a * b;
}
matrixOut[i * matrixWidth + j ] = sum;
}
}
}
|
7,643 | __global__ void vectorAddKernel(float *A, float *B, float *C, int n) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) C[i] = A[i] + B[i];
}
|
7,644 | #include "includes.h"
__global__ void computeHessianListS1(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, double gauss_d1, double gauss_d2, double *hessians, double *e_x_cov_x, double *tmp_hessian, double *cov_dxd_pi, double *point_gradients, int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int row = blockIdx.y;
int col = blockIdx.z;
if (row < 6 && col < 6) {
double *cov_dxd_pi_mat0 = cov_dxd_pi + row * valid_voxel_num;
double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num;
double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num;
double *tmp_h = tmp_hessian + col * valid_voxel_num;
double *h = hessians + (row * 6 + col) * valid_points_num;
double *tmp_pg0 = point_gradients + col * valid_points_num;
double *tmp_pg1 = tmp_pg0 + 6 * valid_points_num;
double *tmp_pg2 = tmp_pg1 + 6 * valid_points_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double pg0 = tmp_pg0[i];
double pg1 = tmp_pg1[i];
double pg2 = tmp_pg2[i];
double final_hessian = 0.0;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
//Transformed coordinates
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
double cov_dxd0 = cov_dxd_pi_mat0[j];
double cov_dxd1 = cov_dxd_pi_mat1[j];
double cov_dxd2 = cov_dxd_pi_mat2[j];
tmp_ex *= gauss_d1;
final_hessian += -gauss_d2 * ((d_x - centroid_x[vid]) * cov_dxd0 + (d_y - centroid_y[vid]) * cov_dxd1 + (d_z - centroid_z[vid]) * cov_dxd2) * tmp_h[j] * tmp_ex;
final_hessian += (pg0 * cov_dxd0 + pg1 * cov_dxd1 + pg2 * cov_dxd2) * tmp_ex;
}
}
h[i] = final_hessian;
}
}
} |
7,645 | #include <cuda_runtime.h>
#include <stdio.h>
#include <math.h>
__global__ void sum_kernel(float* array, int n, float* output){
int position = blockIdx.x * blockDim.x + threadIdx.x;
// 使用 extern声明外部的动态大小共享内存,由启动核函数的第三个参数指定
extern __shared__ float cache[]; // 这个cache 的大小为 block_size * sizeof(float)
int block_size = blockDim.x;
int lane = threadIdx.x;
float value = 0;
if(position < n)
value = array[position];
for(int i = block_size / 2; i > 0; i /= 2){ // 如何理解reduce sum 参考图片:figure/1.reduce_sum.jpg
cache[lane] = value;
__syncthreads(); // 等待block内的所有线程储存完毕
if(lane < i) value += cache[lane + i];
__syncthreads(); // 等待block内的所有线程读取完毕
}
if(lane == 0){
printf("block %d value = %f\n", blockIdx.x, value);
atomicAdd(output, value); // 由于可能动用了多个block,所以汇总结果的时候需要用atomicAdd。(注意这里的value仅仅是一个block的threads reduce sum 后的结果)
}
}
void launch_reduce_sum(float* array, int n, float* output){
const int nthreads = 512;
int block_size = n < nthreads ? n : nthreads;
int grid_size = (n + block_size - 1) / block_size;
// 这里要求block_size必须是2的幂次
float block_sqrt = log2(block_size);
printf("old block_size = %d, block_sqrt = %.2f\n", block_size, block_sqrt);
block_sqrt = ceil(block_sqrt);
block_size = pow(2, block_sqrt);
printf("block_size = %d, grid_size = %d\n", block_size, grid_size);
sum_kernel<<<grid_size, block_size, block_size * sizeof(float), nullptr>>>( // 这里
array, n, output
); // 这里要开辟 block_size * sizeof(float) 这么大的共享内存,
} |
7,646 | #include <assert.h>
#include <errno.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <time.h>
#include <sys/time.h>
#define DEBUG 1
#define NGRID 512
#define NBLOCK 65535
#define CUDA_CHECK(cmd) {cudaError_t error = cmd; if(error!=cudaSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", cudaGetErrorString(error));}}
/* The exponent given here determines the steps taken in the adding kernel. An
* exponent of 1 results in rounding the size to 2^1 = 2, therefore, in every
* step, two input fields are added and the size shrinks to half of what it was
* before. This influences the size of the result buffer as well (the greater
* this exponent is, the smaller the result will be). */
#define BASE_EXP 4
#define BASE (1 << BASE_EXP)
/* Define this to actually use host memory instead of copying the buffer to the
* GPU (as it turns out, this may actually be worth it) */
#define USE_HOST_PTR
#ifdef USE_HOST_PTR
#define HOST_PTR_POLICY CL_MEM_USE_HOST_PTR
#else
#define HOST_PTR_POLICY CL_MEM_COPY_HOST_PTR
#endif
/**
* These two functions provide std::chrono functionality (see cpp-stuff.cpp for
* an explanation why they're extern).
CUDA nvcc don't support it.
extern void clock_start(void);
extern long clock_delta(void);
*/
__global__ void k_iadd(unsigned *dest, char *sequence, unsigned seq_length)
{
for (unsigned id = blockIdx.x*blockDim.x+threadIdx.x;
id < seq_length;
id += blockDim.x*gridDim.x)
{
unsigned result = 0;
unsigned in_start = id << BASE_EXP;
if (in_start < seq_length)
{
for (unsigned i = in_start; i < in_start + BASE; i++)
{
char nucleobase = sequence[i];
result += nucleobase != '-';
}
}
dest[id] = result;
}
}
__global__ void k_cadd(unsigned *buffer, unsigned doff, unsigned soff)
{
unsigned id = blockIdx.x*blockDim.x+threadIdx.x;
unsigned in_start = soff + (id << BASE_EXP);
unsigned out_pos = doff + id;
unsigned result = 0;
for (unsigned i = in_start; i < in_start + BASE; i++)
{
unsigned value = buffer[i];
result += value;
}
buffer[out_pos] = result;
}
/**
* Rounds a value x up to the next power of 2^exp.
*/
static long round_up_to_power_of_two(long x, int exp)
{
assert(x > 0);
x--;
int i;
for (i = 0; x; i++)
x >>= exp;
for (x = 1; i; i--)
x <<= exp;
return x;
}
/**
* Loads a text file and returns a buffer with the contents.
*/
static char *load_text(const char *filename, long *length_ptr)
{
FILE *fp = fopen(filename, "r");
if (!fp)
{
fprintf(stderr, "Could not load file \"%s\": %s\n", filename, strerror(errno));
return NULL;
}
fseek(fp, 0, SEEK_END);
long length = ftell(fp);
rewind(fp);
long mem_len = length + 1;
if (length_ptr)
*length_ptr = mem_len;
char *content = (char *)calloc(mem_len, 1);
fread(content, 1, length, fp);
fclose(fp);
return content;
}
int main(int argc, char *argv[])
{
dim3 grid1d(NGRID,1,1);
dim3 block1d(NBLOCK,1,1);
unsigned clstsi, clstc, clsto, local_index;
long delta_time;
struct timeval start_time, end_time;
if (argc < 2)
{
fprintf(stderr, "Usage: transalign_killer [--cldev=x.y] <input file>\n");
fprintf(stderr, " --cldev=x.y: x specifies the platform index, y the device index.\n");
return 1;
}
long seq_length;
//CUDA kernel input
char *sequence = load_text(argv[argc - 1], &seq_length);
if (!sequence)
return 1;
//DEBUG
for (int i=0; i<seq_length; ++i)
{
printf("%c", sequence[i]);
}
printf("\n");
seq_length--; // Cut final 0 byte
// FIXME: All the following code relies on seq_length being a multiple of BASE.
long round_seq_length = round_up_to_power_of_two(seq_length, BASE_EXP);
long res_length = 0;
for (long len = round_seq_length / BASE; len; len /= BASE)
res_length += len;
printf("res_length: %d\n", res_length);
// Use some random index to be searched for here
unsigned letter_index = seq_length / 2;
// Create the result buffer
// CUDA kernel output
unsigned *result = (unsigned *)malloc(res_length * sizeof(unsigned));
unsigned *result_gpu;
char *seq_gpu;
//replace clock_start(); with gettimeofday()
gettimeofday(&start_time, NULL);
#if DEBUG
printf("GPU part started\n");
#endif
/*** START OF ROCKET SCIENCE LEVEL RUNTIME-TIME INTENSIVE STUFF ***/
// Bandwidth intensive stuff goes here
// Copy the sequence to the video memory (or, generally speaking, the OpenCL device)
CUDA_CHECK(cudaMalloc((void**)&result_gpu, res_length * sizeof(unsigned)));//result_gpu
CUDA_CHECK(cudaMalloc((void**)&seq_gpu, seq_length*sizeof(char)));//seq_gpu
CUDA_CHECK(cudaMemcpy(seq_gpu, sequence, res_length * sizeof(char), cudaMemcpyHostToDevice));
#if DEBUG
printf("GPU malloc and cpy finised\n");
#endif
//replace clock_delta(); with gettimeofday()
gettimeofday(&end_time, NULL);
long bw1_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec);
// GPU intensive stuff goes here
/**
* First, transform every - and \0 into a 0 and every other character into a
* 1. Then, add consecutive fields (BASE fields) together and store them at
* the beginning of the result buffer.
*/
//TODO: ADD correct kernel parameters
#if DEBUG
printf("k_iadd launching\n");
#endif
k_iadd<<<grid1d,block1d>>>(result_gpu, seq_gpu, seq_length);
#if DEBUG
printf("k_iadd finished\n");
#endif
CUDA_CHECK(cudaMemcpy(result, result_gpu, res_length * sizeof(unsigned), cudaMemcpyDeviceToHost));
#if DEBUG
printf("result back\n");
for (int i = 0; i < res_length; i++)
{
printf("%d ", result[i]);
}
printf("\n");
#endif
#if DEBUG
printf("k_iadd result back\n");
#endif
unsigned input_offset = 0, output_offset = round_seq_length / BASE;
CUDA_CHECK(cudaMemcpy(result_gpu, result, res_length * sizeof(unsigned), cudaMemcpyHostToDevice));
#if DEBUG
printf("k_cadd loop start\n");
#endif
for (unsigned kernels = round_seq_length / (BASE * BASE); kernels > 0; kernels /= BASE)
{
/**
* Then, do this addition recursively until there is only one kernel
* remaining which calculates the total number of non-'-' and non-'\0'
* characters.
*/
//TODO: ADD correct kernel parameters
#if DEBUG
printf("k_cadd loop %d\n", kernels);
#endif
k_cadd<<<grid1d,block1d>>>(result_gpu, output_offset, input_offset);
input_offset = output_offset;
output_offset += kernels;
}
#if DEBUG
printf("k_cadd loop end\n");
#endif
// Retrieve the result buffer
#if DEBUG
printf("k_cadd loop end\n");
#endif
CUDA_CHECK(cudaMemcpy(result, result_gpu, res_length * sizeof(unsigned), cudaMemcpyDeviceToHost));
#if DEBUG
printf("k_cadd loop end\n");
#endif
gettimeofday(&end_time, NULL);
long gpu_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec);
// Reverse bandwidth intensive stuff goes here
gettimeofday(&end_time, NULL);
long bw2_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec);
// CPU intensive stuff goes here
#if DEBUG
printf("cpu part start\n");
for (int i=0; i<res_length; ++i)
{
printf("%d ", result[i]);
}
printf("\n");
#endif
if (letter_index > result[res_length - 1])
{
fprintf(stderr, "Logical index out of bounds (last index: %u).\n", result[res_length - 1]);
CUDA_CHECK(cudaFree(result_gpu));
CUDA_CHECK(cudaFree(seq_gpu));
exit(-1);
}
if (!letter_index)
{
fprintf(stderr, "Please used 1-based indexing (for whatever reason).\n");
CUDA_CHECK(cudaFree(result_gpu));
CUDA_CHECK(cudaFree(seq_gpu));
exit(-1);
}
/**
* Okay, now we have a buffer which contains a tree of sums, looking
* something like this:
* _
* 4 |
* / \ |
* 3 1 |- result buffer
* / \ / \ |
* 2 1 1 0 _|
* / \ / \ / \ / \
* A G - T C - - - --- sequence buffer
*
* (actually, it looks more like 2 1 1 2 3 3 6)
*
* Now, we walk through it from the top. Let's assume we're looking for the
* logical index 2. We'll compare it to 4: Of course, it's smaller (that was
* the assertition right before this comment), else, we'd be out of bounds.
* No we're comparing it with the left 3 in the next level. It's smaller,
* therefore, this subtree is correct and we move on to the next level.
* There, we compare it to the left 2. 2 is greater/equal to 2, therefore,
* this is _not_ the right subtree, we have to go to the other one (the one
* to the right, below the 1). We subtract the 2 from the left subtree,
* therefore our new "local" index is 0 (we're looking for the nucleobase at
* index 0 in the subtree below the 1). Now, at the sequence level, there
* are always just two possibilities. Either, the local index is 0 or it is
* 1. If it's 1, this will always mean the right nucleobase, since 1 means
* to skip one. The only one to skip is the left one, therefore, the right
* one is the one we're looking for. If the local index is 0, this refers to
* the first nucleobase, which may be either the left or the right,
* depending on whether the left one is actually a nucleobase.
*
* In this case, the local index is 0. Since the left nucleobase is not
* really one (it is '-'), the right one is the one we're looking for; its
* index in the sequence buffer is 3.
*
* The reference implementation seems to go total hazels, since it
* apparently uses 1-based indexing. Logical index 2 would refer to G for
* it, therefore it returns 2 (which is the 1-based index of G in the
* sequence buffer). I can't see it from the code, but that is what the
* result is.
*
*
* For another BASE than 2, it looks like this (BASE 4):
*
* 9
* // \\
* 3 1 3 2
* // \\ // \\ // \\ // \\
* A G - T C - - - C - T T A G - -
*
* Let's assume, we're looking for index 5. Compare it to 9, it's smaller,
* so this is the tree we're looking for. Then compare it to all subtrees:
* 5 is greater than 3, so go right and subtract 3 from 5. 2 is greater than
* 1, so go right and subtract 1 from 2. 1 then is smaller than 3, so the
* third subtree from the left is the one we want to enter now. The index 1
* here refers to the first T, therefore, it is globally the second T in the
* sequence.
*/
// "Current level subtree starting index"; index of the first subtree sum in
// the current level (we skip level 0, i.e., the complete tree)
clstsi = res_length - 1 - BASE;
// "Current level subtree count"; number of subtrees in the current level
clstc = BASE;
// "Current level subtree offset"; index difference of the actual set of
// subtrees we're using from the first one in the current level
clsto = 0;
// Turn 1-based index into 0-based
local_index = letter_index - 1;
for (;;)
{
int subtree;
// "First subtree index", index of the first subtree we're supposed to
// examine
unsigned fsti = clstsi + clsto * BASE;
// We could add a condition (subtree < BASE) to this loop, but this loop
// has to be left before this condition is false anyway (otherwise,
// something is very wrong).
for (subtree = 0; local_index >= result[fsti + subtree]; subtree++)
local_index -= result[fsti + subtree];
// And we'll check it here anyway (#ifdef NDEBUG).
assert(subtree < BASE);
clsto = clsto * BASE + subtree;
// If clstsi is 0, we were at the beginning of the result buffer and are
// therefore finished
if (!clstsi)
break;
clstc *= BASE;
clstsi -= clstc;
}
// Now we need to go to the sequence level which requires an extra step.
unsigned index;
for (index = clsto * BASE; local_index; index++)
if (sequence[index] != '-')
local_index--;
/*** END OF ROCKET SCIENCE LEVEL RUNTIME-TIME INTENSIVE STUFF ***/
//replace with gettimeofday for CUDA
gettimeofday(&end_time, NULL);
delta_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec);
printf("%li us elapsed total\n", delta_time);
printf(" - %li us on bandwidth forth\n", bw1_time);
printf(" - %li us on GPU\n", gpu_time - bw1_time);
printf(" - %li us on bandwidth back\n", bw2_time - gpu_time);
printf(" - %li us on CPU\n", delta_time - bw2_time);
printf("Index for %u: %u\n", letter_index, index);
printf("cnt = %u (index + 1)\n", index + 1);
//free resource
CUDA_CHECK(cudaFree(result_gpu));
CUDA_CHECK(cudaFree(seq_gpu));
return 0;
}
|
7,647 | #include <iostream>
__global__ void vectorAdd(int *a, int *b, int *c, int n){
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<n)
for(int j=0;j<100;j++)
c[i] = a[i] + b[i];
}
int main(void){
int * a, * b;
int * d_a, * d_b;
int * d_r1, * d_r2, *d_r3;
int * temp, * temp2;
const int n = 1<<24;
const int n_s = 3;
cudaStream_t streams[n_s];
for(int i=0;i<n_s;i++)
cudaStreamCreate(&streams[i]);
a = new int[n*sizeof(int)];
b = new int[n*sizeof(int)];
temp = new int[n*sizeof(int)];
temp2 = new int[n*sizeof(int)];
cudaMalloc(&d_a, n*sizeof(int));
cudaMalloc(&d_b, n*sizeof(int));
cudaMalloc(&d_r1, n*sizeof(int));
cudaMalloc(&d_r2, n*sizeof(int));
cudaMalloc(&d_r3, n*sizeof(int));
for(int i=0;i<n;i++){
a[i] = 3;
b[i] = 5;
}
int blockSize = 256;
int numBlocks = n/256;
cudaMemcpyAsync(d_a, a, n*sizeof(int), cudaMemcpyHostToDevice, streams[0]);
vectorAdd<<<numBlocks,blockSize,0,streams[0]>>>(d_a,d_a,d_r1,n);
cudaMemcpyAsync(d_b, b, n*sizeof(int), cudaMemcpyHostToDevice, streams[1]);
vectorAdd<<<numBlocks, blockSize,0,streams[1]>>>(d_b,d_b,d_r2,n);
vectorAdd<<<numBlocks, blockSize,0,streams[2]>>>(d_a,d_b,d_r3,n);
cudaMemcpyAsync(temp, d_r1, n*sizeof(int), cudaMemcpyDeviceToHost, streams[0]);
cudaDeviceSynchronize();
temp2[0] = temp[0];
for(int i=1;i<n;i++)
temp2[i] = temp2[i-1] + temp[i];
cudaMemcpyAsync(temp, d_r2, n*sizeof(int), cudaMemcpyDeviceToHost, streams[1]);
cudaDeviceSynchronize();
temp2[0] = temp[0];
for(int i=1;i<n;i++)
temp2[i] = temp2[i-1] + temp[i];
cudaMemcpyAsync(temp, d_r3, n*sizeof(int), cudaMemcpyDeviceToHost, streams[2]);
cudaDeviceSynchronize();
temp2[0] = temp[0];
for(int i=1;i<n;i++)
temp2[i] = temp2[i-1] + temp[i];
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_r1);
cudaFree(d_r2);
cudaFree(d_r3);
delete a;
delete b;
delete [] temp;
delete [] temp2;
return 0;
}
|
7,648 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
int main(){
int cudadevice;
struct cudaDeviceProp prop;
cudaGetDevice(&cudadevice);
cudaGetDeviceProperties(&prop, cudadevice);
int mpc = prop.multiProcessorCount;
int mtpb = prop.maxThreadsPerBlock;
int shmsize = prop.sharedMemPerBlock;
printf("Device %d: number of mulitprocessors %d, max number of threads per block %d, shared memory per block %d \n", cudadevice, mpc, mtpb, shmsize);
return 0;
} |
7,649 | #pragma once
#include "Vector3.cuh.cu"
#include "Ray.cuh.cu"
namespace RayTracing
{
class Camera
{
private:
Point3 m_lookAt;
Point3 m_lookFrom;
Vector3 m_viewportHorizontal;
Vector3 m_viewportVertical;
float m_viewportHeight;
float m_viewportWidth;
Point3 m_lowerLeftCorner;
public:
Camera(
const int width,
const int height,
const float horizontalViewDegrees,
const Point3 &lookAt=Point3(),
const Point3 &lookFrom=Point3()
);
void LookAt(
const Point3 &lookAt,
const Point3 &lookFrom
);
Ray GetRay(const float w, const float h) const;
};
} // namespace RayTracing
|
7,650 | #include<stdio.h>
#define N 1237
#define M 2311
enum modo{FILA, COLUMNA, ELEMENTO};
void CheckCudaError(char sms[], int line);
void Examen21(float *mA, float *mB, float *vC, float *vD) {
int i, j;
for (i=0; i<N; i++)
for (j=0; j<M; j++)
mA[i*M + j] = mA[i*M + j]*vC[i] - mB[i*M + j]*vD[j] + mA[i*M]*mB[7*M + j];
}
__global__ void kernel_columna(float mA[N*M], float mB[N*M], float vC[N], float vD[M], int *lock) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= M) return;
if (j == 0) {
for (int i=0; i<N; i++)
mA[i*M] = mA[i*M]*vC[i] - mB[i*M]*vD[0] + mA[i*M]*mB[7*M];
*lock = 1;
return;
}
while(!*lock) __syncthreads();
for (int i=0; i<N; i++)
mA[i*M + j] = mA[i*M + j]*vC[i] - mB[i*M + j]*vD[j] + mA[i*M]*mB[7*M + j];
}
__global__ void kernel_fila(float mA[N*M], float mB[N*M], float vC[N], float vD[M]) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= N) return;
for (int j=0; j<M; j++)
mA[i*M + j] = mA[i*M + j]*vC[i] - mB[i*M + j]*vD[j] + mA[i*M]*mB[7*M + j];
}
__global__ void kernel_elemento(float mA[N*M], float mB[N*M], float vC[N], float vD[M], int *lock) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= N || j >= M) return;
if (j == 0) {
mA[i*M] = mA[i*M]*vC[i] - mB[i*M]*vD[0] + mA[i*M]*mB[7*M];
lock[i] = 1;
return;
}
while(!lock[i]) __syncthreads();
mA[i*M + j] = mA[i*M + j]*vC[i] - mB[i*M + j]*vD[j] + mA[i*M]*mB[7*M + j];
}
int verify(float *mA_ref, float *mA, float tol) {
for (int i=0; i<N*M; i++)
if (fabs(mA_ref[i] - mA[i]) > tol) return 0;
return 1;
}
int main(int argc, char **argv) {
enum modo modo = FILA;
if (argc > 1) {
if ((strcmp("fila", argv[1])) == 0) modo = FILA;
else if ((strcmp("col", argv[1])) == 0) modo = COLUMNA;
else if ((strcmp("ele", argv[1])) == 0) modo = ELEMENTO;
else {
fprintf(stderr, "Parámetro inválido\n");
return 1;
}
}
float *mA, *mB, *vC, *vD;
mA = (float*)malloc(sizeof(float)*N*M);
mB = (float*)malloc(sizeof(float)*N*M);
vC = (float*)malloc(sizeof(float)*N);
vD = (float*)malloc(sizeof(float)*M);
// Rellenamos las matrices con valores de prueba
for (int i=0; i<N; i++) {
for (int j=0; j<M; j++) {
mA[i*M + j] = 1.0 + (i*3)%7;
mB[i*M + j] = 2.0 + j%11;
}
vC[i] = i*0.3;
}
for (int j=0; j<M; j++) vD[j] = j*0.75;
float *mA_dev, *mB_dev, *vC_dev, *vD_dev;
cudaMalloc((float**)&mA_dev, sizeof(float)*N*M);
cudaMalloc((float**)&mB_dev, sizeof(float)*N*M);
cudaMalloc((float**)&vC_dev, sizeof(float)*N);
cudaMalloc((float**)&vD_dev, sizeof(float)*M);
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
cudaMemcpy(mA_dev, mA, sizeof(float)*N*M, cudaMemcpyHostToDevice);
cudaMemcpy(mB_dev, mB, sizeof(float)*N*M, cudaMemcpyHostToDevice);
cudaMemcpy(vC_dev, vC, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(vD_dev, vD, sizeof(float)*M, cudaMemcpyHostToDevice);
CheckCudaError((char *) "Memcpy H -> D", __LINE__);
int *lock;
dim3 dimGrid, dimBlock;
switch (modo) {
case FILA:
dimBlock = dim3(1024, 1, 1);
dimGrid = dim3((N*M + dimBlock.x - 1)/dimBlock.x, 1, 1);
kernel_fila<<<dimGrid, dimBlock>>>(mA_dev, mB_dev, vC_dev, vD_dev);
break;
case COLUMNA:
cudaMalloc((int**)&lock, sizeof(int));
cudaMemset(lock, 0, sizeof(int));
CheckCudaError((char *) "Crear lock", __LINE__);
dimBlock = dim3(1024, 1, 1);
dimGrid = dim3((N*M + dimBlock.x - 1)/dimBlock.x, 1, 1);
kernel_columna<<<dimGrid, dimBlock>>>(mA_dev, mB_dev, vC_dev, vD_dev, lock);
break;
case ELEMENTO:
cudaMalloc((int**)&lock, N*sizeof(int));
cudaMemset(lock, 0, N*sizeof(int));
CheckCudaError((char *) "Crear lock", __LINE__);
dimBlock = dim3(32, 32, 1);
dimGrid = dim3((N + dimBlock.x - 1)/dimBlock.x, (M + dimBlock.y - 1)/dimBlock.y , 1);
kernel_elemento<<<dimGrid, dimBlock>>>(mA_dev, mB_dev, vC_dev, vD_dev, lock);
break;
default:
fprintf(stderr, "ERROR\n");
}
CheckCudaError((char *) "Kernel", __LINE__);
float *mA_cuda = (float*)malloc(sizeof(float)*N*M);
cudaMemcpy(mA_cuda, mA_dev, sizeof(float)*N*M, cudaMemcpyDeviceToHost);
CheckCudaError((char *) "Memcpy D -> H", __LINE__);
cudaFree(mA_dev);
cudaFree(mB_dev);
cudaFree(vC_dev);
cudaFree(vD_dev);
cudaDeviceSynchronize();
Examen21(mA, mB, vC, vD);
// Comprobación con tolerancia alta debido a errores de float
if (!verify(mA, mA_cuda, 1e-2)) {
fprintf(stderr, "FAIL\n");
return 1;
}
fprintf(stderr, "OK\n");
}
void CheckCudaError(char sms[], int line) {
cudaError_t error;
error = cudaGetLastError();
if (error) {
fprintf(stderr, "(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line);
exit(EXIT_FAILURE);
} // else fprintf(stderr, "(OK) %s \n", sms);
}
|
7,651 | #include "includes.h"
__global__ void RBMCopyFilterKernel( float *weightPtr, float *filterPtr, int weightCount, int i, int thisLayerSize )
{
int weightIndex = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (weightIndex < weightCount)
{
filterPtr[weightIndex] = weightPtr[i + weightIndex * thisLayerSize];
}
} |
7,652 | // tatami.cu
#include <cuda.h>
#include <iostream>
const unsigned nMax(100000000);
const unsigned nMaxSqrt(sqrt(nMax));
__global__ void odd(unsigned* v, unsigned base)
{
unsigned i = (blockIdx.x * blockDim.x + threadIdx.x + base) * 2 + 7;
unsigned k2 = i + 3;
unsigned k3 = i + i - 4;
while ((k2 <= k3) && ((i * k2) < nMax))
{
unsigned k4 = (nMax - 1) / i;
if (k3 < k4)
k4 = k3;
__syncthreads();
for (unsigned j = k2 / 2; j <= k4 / 2; j++)
atomicAdd(&v[i * j], 1);
__syncthreads();
k2 += i + 1;
k3 += i - 1;
}
__syncthreads();
}
__global__ void even(unsigned* v, unsigned base)
{
unsigned i = (blockIdx.x * blockDim.x + threadIdx.x + base) * 2 + 8;
unsigned k2 = i + 3;
unsigned k3 = i + i - 4;
while ((k2 <= k3) && ((i * k2) < nMax))
{
unsigned k4 = (nMax - 1) / i;
if (k3 < k4)
k4 = k3;
__syncthreads();
for (unsigned j = k2; j <= k4; ++j)
atomicAdd(&v[i * j / 2], 1);
__syncthreads();
k2 += i + 1;
k3 += i - 1;
}
__syncthreads();
}
int Tatami(int s)
{
unsigned* v;
cudaMalloc(&v, sizeof(unsigned) * nMax);
cudaMemset(v, 0, sizeof(unsigned) * nMax);
const unsigned group_size = 1024;
{
// for (int i = 8; i < nMaxSqrt; i += 2)
const unsigned iterations = (nMaxSqrt - 8) / 2;
const unsigned groups = iterations / group_size;
const unsigned trailing_group_size = iterations - group_size * groups;
even<<<groups, group_size>>>(v, 0);
if (trailing_group_size)
even<<<1, trailing_group_size>>>(v, groups * group_size);
}
{
// for (int i = 7; i < nMaxSqrt; i += 2)
const unsigned iterations = (nMaxSqrt - 7) / 2;
const unsigned groups = iterations / group_size;
const unsigned trailing_group_size = iterations - group_size * groups;
odd<<<groups, group_size>>>(v, 0);
if (trailing_group_size)
odd<<<1, trailing_group_size>>>(v, groups * group_size);
}
cudaDeviceSynchronize(); // Ought not be required since cudaMemcpy is synchronous!!!
unsigned* vh = (unsigned*)malloc(sizeof(unsigned) * nMax);
cudaMemcpy(vh, v, sizeof(unsigned) * nMax, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize(); // Ought not be required since cudaMemcpy is synchronous!!!
for (unsigned i = 0; i < nMax; ++i)
if (vh[i] == s)
return i + i;
return 0; // shouldn't happen
}
int main()
{
int s = 200;
std::cout << "T(" << Tatami(s) << ")=" << s << std::endl;
}
|
7,653 | #include "includes.h"
__global__ void SumBasicSymbolsKernel( float *symbolVectors, int symbolOneId, int symbolTwoId, float *result, int symbolSize )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < symbolSize)
{
result[threadId] = symbolVectors[symbolOneId * symbolSize + threadId] + symbolVectors[symbolTwoId * symbolSize + threadId];
}
} |
7,654 | #include<stdio.h>
#include<iostream>
#include<cuda.h>
int main(int argc, char* argv[]){
if(argc!=3){
std::cout<<"Usage: "<<argv[0]<<" Numblocks BlockDim\n";
return 0;
}
int nBlocks= atoi(argv[1]);
int bDim = atoi(argv[2]);
if(bDim>1024){
std::cout<<"BlockDim should be less than or equal to 1024\n";
return 0;
}
std::cout<<"Lets do this!!!\n";
}
|
7,655 | #include <iostream>
#include <vector>
#include <random>
#include <time.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
using std::vector;
#define SIZE 10
int main()
{
thrust::device_vector<float> d_V1(SIZE);
thrust::device_vector<float> d_V2(SIZE);
thrust::device_vector<float> d_V3(SIZE);
thrust::host_vector<float> h_V1(SIZE);
thrust::host_vector<float> h_V2(SIZE);
thrust::host_vector<float> h_V3(SIZE);
thrust::sequence(h_V1.begin(), h_V1.end(), 1);
thrust::fill(h_V2.begin(), h_V2.end(), 75);
std::cout << "----- V1 -----" << std::endl;
for(int i = 0; i < SIZE; ++i) std::cout << h_V1[i] << " ";
std::cout << std::endl;
std::cout << "----- V2 -----" << std::endl;
for(int i = 0; i < SIZE; ++i) std::cout << h_V2[i] << " ";
std::cout << std::endl;
d_V1 = h_V1;
d_V2 = h_V2;
thrust::transform(d_V1.begin(), d_V1.end(), d_V2.begin(), d_V3.begin(), thrust::minus<float>());
thrust::copy(d_V3.begin(), d_V3.end(), h_V3.begin());
std::cout << "----- V3 -----" << std::endl;
for(int i = 0; i < SIZE; ++i) std::cout << h_V3[i] << " ";
std::cout << std::endl;
return 0;
}
|
7,656 | #define GROUPSIZE 64
#include <cstdio>
#define ASSERT_NO_CUDA_ERROR() { \
cudaThreadSynchronize(); \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) { \
printf("Cuda error (%s/%d) in file '%s' in line %i\n", \
cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(1); \
} \
} while(0);
__global__ void k(int *dobarrier) {
int i = threadIdx.x;
if (dobarrier[i]) {
asm("bar.sync 0;"); // use asm to foil nvcc
}
}
int main() {
int *dobarrier = new int[GROUPSIZE];
for (int i=0; i<GROUPSIZE; i++) {
dobarrier[i] = (i % 2 == 0) ? 1 : 0;
}
int *d_dobarrier;
size_t d_dobarrier_size = sizeof(int)*GROUPSIZE;
cudaMalloc((void **)&d_dobarrier, d_dobarrier_size);
cudaMemcpy(d_dobarrier, dobarrier, d_dobarrier_size, cudaMemcpyHostToDevice);
ASSERT_NO_CUDA_ERROR();
k<<<1, GROUPSIZE>>>(d_dobarrier);
ASSERT_NO_CUDA_ERROR();
cudaFree(d_dobarrier);
delete[] dobarrier;
return 0;
}
|
7,657 | #include <vector>
#include <typeinfo>
#include <thrust/device_vector.h>
#include <thrust/random.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
__device__
unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
template<typename T>
class RealRandomNumberFunctor : public thrust::unary_function<unsigned int, T>
{
public:
RealRandomNumberFunctor(unsigned int seed, T min, T max)
: m_seed(seed)
, m_min(min)
, m_max(max)
{}
__device__
T operator()(unsigned int thread_id)
{
thrust::default_random_engine rng(m_seed + hash(thread_id));
thrust::uniform_real_distribution<T> distribution(m_min, m_max);
return distribution(rng);
}
private:
unsigned int m_seed;
T m_min, m_max;
};
template<typename T>
class IntRandomNumberFunctor : public thrust::unary_function<unsigned int, T>
{
public:
IntRandomNumberFunctor(unsigned int seed, T min, T max)
: m_seed(seed)
, m_min(min)
, m_max(max)
{}
__device__
T operator()(unsigned int thread_id)
{
thrust::default_random_engine rng(m_seed + hash(thread_id));
thrust::uniform_int_distribution<T> distribution(m_min, m_max);
return distribution(rng);
}
private:
unsigned int m_seed;
T m_min, m_max;
};
template<typename T>
void thrust_generate_random_ex(typename std::vector<T>::iterator,
size_t, unsigned int, T, T);
/*
{
std::string message("thrust random generator do not support ");
throw std::logic_error(message + typeid(T).name());
// and then I realised that it will be never used.
}
*/
template<>
void thrust_generate_random_ex<float>(std::vector<float>::iterator begin,
size_t size,
unsigned int seed,
float min, float max)
{
thrust::device_vector<float> d_vec(size);
thrust::transform(thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(size),
d_vec.begin(), RealRandomNumberFunctor<float>(seed, min, max));
thrust::copy(d_vec.begin(), d_vec.end(), begin);
}
template<>
void thrust_generate_random_ex<int>(std::vector<int>::iterator begin,
size_t size,
unsigned int seed,
int min, int max)
{
thrust::device_vector<int> d_vec(size);
thrust::transform(thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(size),
d_vec.begin(), IntRandomNumberFunctor<int>(seed, min, max));
thrust::copy(d_vec.begin(), d_vec.end(), begin);
}
template<>
void thrust_generate_random_ex<char>(std::vector<char>::iterator begin,
size_t size,
unsigned int seed,
char min, char max)
{
thrust::device_vector<char> d_vec(size);
thrust::transform(thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(size),
d_vec.begin(), IntRandomNumberFunctor<char>(seed, min, max));
thrust::copy(d_vec.begin(), d_vec.end(), begin);
}
size_t cuda_get_free_mem()
{
size_t mem_tot;
size_t mem_free;
cudaMemGetInfo(&mem_free, &mem_tot);
return mem_free;
//std::cout << "Free memory : " << mem_free << std::endl;
//std::cout << "Total memory : " << mem_tot << std::endl;
}
|
7,658 | #include<stdio.h>
#include<stdlib.h>
#include<malloc.h>
#include<time.h>
#include<cuda.h>
#include<string.h>
__global__
void multiplicationKernell(float* m1, float* m2, float* m3, int rowsM1, int colsM2)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if((Row < rowsM1) && (Col < colsM2))
{
float resul = 0.0;
for(int i = 0; i < rowsM1; i++)
{
resul = resul + m1[Row*rowsM1+i] * m2[i*rowsM1+Col];
}
m3[Row*rowsM1+Col] = resul;
}
}
__host__
void toMatrix(float *M, FILE *content, int rows, int cols)
{
for(int i=0; i<rows;i++)
{
for(int j=0; j<cols; j++)
{
fscanf(content,"%f",&M[i*cols+j]);
}
}
fclose(content);
}
__host__
void print(float *M, int rows, int cols)
{
printf("----------MATRIX----------\n");
for(int i=0; i<rows;i++)
{
for(int j=0; j<cols; j++)
{
printf("[%f]",M[i*cols+j]);
}
printf("\n");
}
}
int main(int argc, char** argv)
{
if(argc != 3)
{
printf("Error, no se encontraron todos los parametros necesarios.");
return 1;
}
FILE *inputMatrix1;
FILE *inputMatrix2;
inputMatrix1 = fopen(argv[1],"r");
inputMatrix2 = fopen(argv[2],"r");
float *m1, *m2, *m3;
int rowsM1, rowsM2, colsM1, colsM2, rowsM3, colsM3;
fscanf(inputMatrix1,"%d",&rowsM1);
fscanf(inputMatrix1,"%d",&colsM1);
fscanf(inputMatrix2,"%d",&rowsM2);
fscanf(inputMatrix2,"%d",&colsM2);
m1 = (float*) malloc(rowsM1*colsM1*sizeof(float));
m2 = (float*) malloc(rowsM2*colsM2*sizeof(float));
m3 = (float*) malloc(rowsM1*colsM2*sizeof(float));
toMatrix(m1, inputMatrix1, rowsM1, colsM1);
toMatrix(m2, inputMatrix2, rowsM2, colsM2);
print(m1, rowsM1, colsM1);
print(m2, rowsM2, colsM2);
if((rowsM1 != colsM2))
{
printf("Error los tamaños de las matrices no son compatibles.");
return 1;
}
//Para el Devince
cudaError_t error = cudaSuccess;
float *d_m1, *d_m2, *d_m3;
int blockSize = 32;
dim3 dimBlockSize(blockSize,blockSize,1);
dim3 dimGridSize(ceil(colsM1 / float(blockSize)), ceil(rowsM1 / float(blockSize)), 1);
error = cudaMalloc((void**)&d_m1, rowsM1 * colsM1 * sizeof(float));
if(error != cudaSuccess)
{
printf("Imposible asignar memoria para d_m1");
return 1;
}
error = cudaMalloc((void**)&d_m2, rowsM2 * colsM2 * sizeof(float));
if(error != cudaSuccess)
{
printf("Imposible asignar memoria para d_m2");
return 1;
}
error = cudaMalloc((void**)&d_m3, rowsM3 * colsM3 * sizeof(float));
if(error != cudaSuccess)
{
printf("Imposible asignar memoria para d_m3");
return 1;
}
cudaMemcpy(d_m1, m1, rowsM1 * colsM1 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_m2, m2, rowsM2 * colsM2 * sizeof(float), cudaMemcpyHostToDevice);
multiplicationKernell<<<dimGridSize, dimBlockSize>>>(d_m1, d_m2, d_m3, rowsM1, colsM2);
cudaMemcpy(m3, d_m3, rowsM1 * colsM2 * sizeof(float), cudaMemcpyDeviceToHost);
print(m3, rowsM1, colsM2);
free(m1);
free(m2);
free(m3);
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_m3);
return 0;
} |
7,659 | #include <stdio.h>
#include <string.h>
#define tpb 32
__global__ void bitMask(int* nums,int*len, int* out, int* last, int*bit, int*value){
int index=threadIdx.x + blockIdx.x*tpb;
if (index<*len) out[index]=(((nums[index]>>(*bit))%2)==*value);
if (index==((*len)-1)) *last=(((nums[index]>>(*bit))%2)==*value);
}
__global__ void exToIn(int* inp, int* out, int*len, int*last){
int index = threadIdx.x + blockIdx.x*tpb;
if((index>0)&&(index<*len)){
out[index-1]=inp[index];
}
if(index==((*len)-1)) { out[index]=inp[index]+*last;
*last=out[index];
}
}
__global__ void upSweep(int* arr, int* len, int* tLen, int step){
int index=threadIdx.x + blockIdx.x*tpb;
if(index>*tLen) return;
if((((index+1)%(step*2))!=0) || index==0 || ((*len)<=index)) return;
arr[index]=arr[index]+arr[index-step];
}
__global__ void downSweep(int* arr, int* len, int* tLen, int step){
int index=threadIdx.x + blockIdx.x*tpb;
if(2*step==*len) arr[(*len)-1]=0;
if((((index+1)%(step*2))!=0) || (index==0) || ((*len)<=index)) return;
int tmp=arr[index-step];
arr[index-step]=arr[index];
arr[index]+=tmp;
}
__global__ void copyOddsP(int*inp, int*prefix, int*inpLen,int*out){
if((blockIdx.x+threadIdx.x)==0){ out[0]=inp[0];}
else if((blockIdx.x+threadIdx.x)<*inpLen){
int i=threadIdx.x + blockIdx.x*tpb;
if(prefix[i]!=prefix[i-1]){
out[prefix[i-1]]=inp[i];
}
}
}
int* filter(int* cudNum, int numLen, int bit, int value, int** zeroLen){
int* cBit;
int* cVal;
cudaMalloc(&cBit,sizeof(int));
cudaMalloc(&cVal,sizeof(int));
cudaMemcpy(cBit,&bit,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(cVal,&value,sizeof(int),cudaMemcpyHostToDevice);
int falseLen=1;
while(falseLen<numLen) falseLen*=2;
int Len=falseLen;
int* cudLen;
cudaMalloc(&cudLen,sizeof(int));
cudaMemcpy(cudLen,&Len,sizeof(int),cudaMemcpyHostToDevice);
int* trueLen;
cudaMalloc(&trueLen,sizeof(int));
cudaMemcpy(trueLen,&numLen,sizeof(int),cudaMemcpyHostToDevice);
int* out;
cudaMalloc(&out,(Len+1)*sizeof(int));
int* last;
cudaMalloc(&last,sizeof(int));
bitMask<<<(Len+tpb)/tpb,tpb>>>(cudNum,trueLen,out,last,cBit,cVal);
//(value==0) {printInt<<<1,1>>>(last);printArr<<<1,1>>>(out,trueLen);}
for(int step=1; step<Len; step*=2){ upSweep<<<(Len+tpb)/tpb,tpb>>>(out,cudLen,trueLen,step); }
for(int step=Len/2; step>0; step/=2){ downSweep<<<(Len+tpb)/tpb,tpb>>>(out,cudLen,trueLen,step); }
Len=numLen;
cudLen=trueLen;
int* shifted;
cudaMalloc(&shifted,Len*sizeof(int));
exToIn<<<(Len+tpb)/tpb,tpb>>>(out,shifted,cudLen,last);
int* cudOut;
cudaMalloc((void**) &cudOut, Len*sizeof(int));
copyOddsP<<<(Len+tpb)/tpb,tpb>>>(cudNum, shifted, cudLen,cudOut);
*zeroLen = last;
//cudaFree(cudLen);
//cudaFree(cudNum);
//cudaFree(out);
//cudaFree(last);
//cudaFree(shifted);
return cudOut;
}
__global__ void copyArr(int*a, int*b, int*c, int* lenB, int*lenC){
int index=threadIdx.x + blockIdx.x*tpb;
if(index>=((*lenB)+(*lenC))) return;
if(index<(*lenB)) a[index]=b[index];
else a[index]=c[index-(*lenB)];
}
int main(int argc,char **argv){
char buff[50000];
int inp[15000];
buff[0]=' ';
char* token;
FILE* fp = fopen("inp.txt", "r" );
fgets(buff+1, 50000, fp);
token=strtok(buff,",");
int numLen=0;
while(token!=NULL){
inp[numLen]=atoi(token+1);
numLen++;
token=strtok(NULL,",");
}
int* zerLen;
int* oneLen;
int* start;
int* end;
int* cudLen;
int maxBit=1;
int* cudNum;
cudaMalloc(&cudNum,(numLen*sizeof(int)));
cudaMemcpy(cudNum,inp,(numLen)*sizeof(int),cudaMemcpyHostToDevice);
while((1<<maxBit)<numLen) maxBit++;
cudaMalloc(&cudLen,sizeof(int));
cudaMemcpy(cudLen,&numLen,sizeof(int),cudaMemcpyHostToDevice);
for(int i=0;i<10; i++){
start=filter(cudNum,numLen,i,0, &zerLen);
end=filter(cudNum,numLen,i,1,&oneLen);
copyArr<<<(numLen+tpb)/tpb,tpb>>>(cudNum,start,end,zerLen,oneLen);
}
cudaMemcpy(inp,cudNum,numLen*sizeof(int),cudaMemcpyDeviceToHost);
fclose(fp);
int len=numLen;
FILE* fp_end = fopen("q4.txt", "w");
for (int i = 0; i < len; i++) {
fprintf(fp_end, "%d", inp[i]);
if (i != len-1) {
fprintf(fp_end, ", ");
}
}
}
|
7,660 | /*Single Author info:
mreddy2 Muppidi Harshavardhan Reddy */
/* Program to compute Pi using Monte Carlo methods */
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include<cuda_runtime.h>
#include<curand_kernel.h>
#define SEED 35791246
#define THREADS 512
__global__ void integrate(double *x_d,double *y_d, int nitter, curandState *state,double *gsum)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i;
curand_init(SEED, idx, 0, &state[idx]); //Initializing for Currand Function
__shared__ double sum[THREADS];
if(idx<nitter)
{
curandState localState = state[idx];
x_d[idx] = curand_uniform(&localState);
y_d[idx] = curand_uniform(&localState);
if((x_d[idx]*x_d[idx] + y_d[idx]*y_d[idx])<=1){
sum[threadIdx.x] = 1;}
else
{
sum[threadIdx.x] =0;}
}
// block reduction
__syncthreads();
for (i = blockDim.x / 2; i > 0; i >>= 1) { /* per block */
if (threadIdx.x < i)
sum[threadIdx.x] += sum[threadIdx.x + i];
__syncthreads();
}
if (threadIdx.x == 0){
gsum[blockIdx.x] = sum[threadIdx.x]; // Getting Each Block Total Points
}
}
int main(int argc, char** argv)
{
int niter=0; // Total Number of Points
double *x_d,*y_d, *z,*result_d; // Device Copy
int *blocks_d;
double count, pi; // Host Copies
int i,blocks;
curandState *states_d; // For Currand State Device Copy
niter = atoi(argv[1]);
blocks = (niter/THREADS) + 1; // Caluclating Number of Blocks Based on total Points
z = (double *)malloc(niter * sizeof(double)); // Allocating Memory in CPU to use for copying back from GPU
// GPU Memory Allocation for device copies
cudaMalloc( (void **) &blocks_d, sizeof(int) * 1 );
cudaMalloc((void **)&states_d, niter * sizeof(curandState));
cudaMalloc((void **)&x_d, niter * sizeof(double));
cudaMalloc((void **)&y_d, niter * sizeof(double));
cudaMalloc( (void **) &result_d, sizeof(double) * THREADS * blocks);
integrate<<<blocks,THREADS>>>(x_d, y_d, niter,states_d,result_d);
// copy back from GPU to CPU
cudaMemcpy( z, result_d, blocks*sizeof(double), cudaMemcpyDeviceToHost);
for(i=0;i<blocks;i++) // Summing up total points at all Blocks
{
count+= z[i];
}
cudaFree(x_d); // Deallocation of the Memory
cudaFree(blocks_d);
cudaFree(y_d);
cudaFree(result_d);
cudaFree(states_d);
pi=(double)count/niter*4;
printf(" # of trials= %d , estimate of pi is %.16f \n",niter,pi);
}
|
7,661 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
#include <curand.h>
//#include <sys/time.h>
#define SEED 921
#define NUM_ITER 1000000 //Iterations per thread
#define NUM_BLOCKS 1024
#define TPB 128
__global__ void calculatePi(curandState *dev_random, unsigned long long *totals) {
__shared__ unsigned long long count[NUM_BLOCKS];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(SEED, i, 0, &dev_random[i]);
double x, y, z;
count[threadIdx.x] = 0;
for (int iter = 0; iter < NUM_ITER; ++iter) {
x = curand_uniform(&dev_random[i]);
y = curand_uniform(&dev_random[i]);
z = (x*x) + (y*y);
if (z <= 1.0)
{
count[threadIdx.x] += 1;
}
}
if (threadIdx.x == 0) {
totals[blockIdx.x] = 0;
for (int i = 0; i < TPB; ++i) {
totals[blockIdx.x] += count[i];
}
}
}
int main(int argc, char* argv[]) {
//struct timeval start, end;
curandState *dev_random;
unsigned long long *totals, *d_totals;
unsigned long long NumThreads = (unsigned long long) (NUM_BLOCKS * TPB);
unsigned long long NumIter = (double) NUM_ITER;
cudaMalloc((void**)&dev_random, NumThreads * sizeof(curandState));
cudaMalloc(&d_totals, NUM_BLOCKS * sizeof(unsigned long long));
totals = (unsigned long long*)malloc(NUM_BLOCKS * sizeof(unsigned long long));
//gettimeofday(&start, NULL);
calculatePi<<<NUM_BLOCKS, TPB>>>(dev_random, d_totals);
//gettimeofday(&end, NULL);
cudaMemcpy(totals, d_totals, NUM_BLOCKS * sizeof(unsigned long long), cudaMemcpyDeviceToHost);
unsigned long long count = 0;
for (int i = 0; i < NUM_BLOCKS; ++i) {
count += totals[i];
}
double pi = ((double) count / (double)(NumThreads * NumIter)) * 4.0;
printf("%f \n", pi);
/*
printf(
"The result is %.15f after %ld samples in %ld microseconds!\n",
pi,
(long int)(NumThreads * NumIter),
((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)));
*/
cudaFree(dev_random);
cudaFree(d_totals);
cudaFree(totals);
return 0;
} |
7,662 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include "math.h"
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* CUDA Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int height,
unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
if (x > 0 && x < width-1 && y > 0 && y < height-1 ){
int loc = y*width+x;
int i1 = ((int)input[loc-width-1]) * -1;
int i2 = ((int)input[loc-width]) * -2;
int i3 = ((int)input[loc-width+1]) * -1;
int i4 = ((int)input[loc+width-1]) * 1;
int i5 = ((int)input[loc+width]) * 2;
int i6 = ((int)input[loc+width-1]) * 1;
int it=0;
it = (i1 + i2 + i3 + i4 + i5 + i6)/6;
int d1 = ((int)input[loc-width-1]) * 1;
int d2 = ((int)input[loc-1]) * 2;
int d3 = ((int)input[loc-1]) * 1;
int d4 = ((int)input[loc-width+1]) * -1;
int d5 = ((int)input[loc+1]) * -2;
int d6 = ((int)input[loc+width+1]) * -1;
int dt=0;
dt = (d1 + d2 + d3 + d4 + d5 + d6)/6;
int total=0;
total = (int)(sqrt((float)it*(float)it + (float)dt*(float)dt));
output[loc] = (unsigned char)total;
}
}
void transpose_img(unsigned char *in_mat,
unsigned char *out_mat,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
in_mat,
height*width*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
kernel<<<dimGrid, dimBlock>>>(input_gpu, output_gpu, height, width);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(out_mat,
output_gpu,
height*width*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
} |
7,663 | #include "includes.h"
__global__ void plusMinus(int *size, const double *base, const float *deviation, double *a, float *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
a[ix] = base[ix] - deviation[ix];
b[ix] = base[ix] + deviation[ix];
}
} |
7,664 | // In this assignment you will expand your "Hello world" kernel to see how
// are threads, warps and blocks scheduled.
//
// Follow instructions for TASK 1 which consists from writing a kernel,
// configuring it and then running the code. After running the code few
// times you should see that blocks are executed in no particular order
//
// After you finish TASK 1 continue with TASK 2 and TASK 3 following same
// workflow. Write the kernel, configure it properly and then run code
// multiple times to see how threads from one warp are schedules and how
// warps from one block are scheduled.
// NOTE: You should finish your basic "Hello world" assignment first, before
// doing this one.
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
//----------------------------------------------------------------------
// TASK 1.0: Write a new "Hello world" kernel, called for example
// 'helloworld_blocks', which in addition to "Hello world" writes out which
// block is writing out the string.
// For example "Hello world from block 2!"
//
// In order to print which block is saying "Hello world" you can use syntax
// like this:
// printf("integer=%d; float=%f or %e;\n",1, 0.0001, 0.0001);
// Also remember that every thread can access pre-set variable which
// refer to its coordinates and coordinates of the block which it resides in.
// These are dim3 data types called: threadIdx, blockIdx, blockDim
// and gridDim
// dim3 data type has three components: x, y, z
// write your kernel here
__global__ void helloworld_blocks(void) {
printf("Hello from block %d!\n", blockIdx.x);
}
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 2.0: Write a "Hello world" kernel which output "Hello world" but
// in addition to that also outputs which block and thread it
// comes from. For example: "Hello world from block 1, thread 3"
//
// As in task one use printf() function to print to console and utilise
// pre-set variables threadIdx, blockIdx, blockDim and gridDim.
// write your kernel here
__global__ void helloworld_bt(void) {
printf("Hello from block %d and thread %d!\n", blockIdx.x, threadIdx.x);
}
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 3.0: Write a "Hello world" kernel where only first thread from each
// warp writes out to console. So for example:
// "Hello world from block 2, warp 3"
//
// A warp is group of 32 threads. First warp is consists from threads 0--31,
// second warp consists from threads 32--63 and so on. To select first thread
// from each warp we have to use modulo "%" operation. Modulo operation returns
// remainder after division. So 3%2=1 while 4&2=0;
// To select first thread from each warp we need to use a branch like this:
// if(threadIdx.x%32==0) {
// this block will be executed only by first thread from each warp
// }
// To identify which warp thread resides in you should remember that warp consist
// from 32 threads.
// write your kernel here
__global__ void helloworld_first(void) {
if (threadIdx.x%32 == 0) {
printf("Hello from warp %d!\n", (threadIdx.x / 32));
}
}
//----------------------------------------------------------------------
int main(void) {
// initiate GPU
int deviceid = 0;
int devCount;
cudaGetDeviceCount(&devCount);
if(deviceid<devCount){
cudaSetDevice(deviceid);
}
else {
printf("ERROR! Selected device is not available\n");
return(1);
}
//----------------------------------------------------------------------
// TASK 1.1: execute your "Hello world" kernel from TASK 1.0 on few blocks
// (10 should be enough) with 1 thread. When you had configured your
// kernel compile the code typing "make" and then run it be executing
// ./helloworld_scheduling.exe
// You should see that blocks are scheduled in haphazard manner.
//
// You may use whatever syntax version you prefer, a simplified one
// dimensional or full three dimensional call using dim3 data type.
// put your code here
dim3 Gd(10,1,1);
dim3 Bd(1,1,1);
helloworld_blocks<<<Gd, Bd>>>();
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 2.1: execute your "Hello world" kernel from TASK 2.0 on about
// 5 blocks each containing about 10 threads. When you configured the kernel
// compile the code typing "make" and then run it be executing
// ./helloworld_scheduling.exe
// You should see that blocks are still scheduled in haphazard manner,
// but threads within them, being from one warp should execute in order.
//
// You may use whatever syntax version you prefer, a simplified one
// dimensional or full three dimensional call using dim3 data type.
// put your code here
dim3 Gd_2(5,1,1);
dim3 Bd_2(10,1,1);
helloworld_bt<<<Gd_2, Bd_2>>>();
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 3.1: execute your "Hello world" kernel from TASK 3.0 on about
// 5 blocks each containing about 320 threads. When you configured the kernel
// compile the code typing "make" and then run it be executing
// ./helloworld_scheduling.exe
// You should see that both blocks and warps within them are scheduled
// in haphazard manner.
// To see more clearly that warps are executed in haphazard manner run
// your kernel with only one block.
//
// You may use whatever syntax version you prefer, a simplified one
// dimensional or full three dimensional call using dim3 data type.
// put your code here
dim3 Gd_3(5,1,1);
dim3 Bd_3(320,1,1);
helloworld_first<<<Gd_3, Bd_3>>>();
//----------------------------------------------------------------------
cudaDeviceReset();
return (0);
}
|
7,665 | extern int f();
extern float* g();
extern void h();
__global__
void k1(float *a, int b) {
int gid = threadIdx.x + blockIdx.x * blockDim.x;
a[gid] += b;
}
void launches_k1(float *a) {
k1<<<8, 8>>>(a, 4);
k1<<<8, f()>>>(a, 4);
k1<<<f(), f()>>>(a, 4);
k1<<<f(), f()>>>(g(), f());
k1<<<f()+f(), f()*2>>>(g(), f()+f());
}
|
7,666 | /****
File: findRedsDriver.cu
Date: 5/3/2018
By: Bill Hsu
Compile: nvcc findRedsDriver.cu -o frgpu
Run: ./frgpu
****/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <cuda.h>
#define NUMPARTICLES 32768
#define NEIGHBORHOOD .05
#define THREADSPERBLOCK 64
void initPos(float *);
float findDistance(float *, int, int);
__device__ float findDistanceGPU(float *, int, int);
void dumpResults(int index[]);
__global__ void findRedsGPU(float *p, int *numI);
int main() {
cudaEvent_t start, stop;
float time;
//pointer for host
float *pos;
int *numReds;
//pointer for devices
float *device_Pos;
int *device_Reds;
//memory allocation for main host
pos = (float *) malloc(NUMPARTICLES * 4 * sizeof(float));
numReds = (int *) malloc(NUMPARTICLES * sizeof(int));
initPos(pos);
//memory allocation for devices
cudaMalloc((void **)&device_Pos,NUMPARTICLES * 4 * sizeof(float));
cudaMalloc((void **)&device_Reds,NUMPARTICLES * sizeof(int));
//memory copy from main host to devices
cudaMemcpy(device_Pos,pos,NUMPARTICLES * 4 * sizeof(float),cudaMemcpyHostToDevice);
// create timer events
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* invoke kernel findRedsGPU here */
findRedsGPU<<<NUMPARTICLES/THREADSPERBLOCK,THREADSPERBLOCK>>>(device_Pos,device_Reds);
//After findRedsGPU, sync need to be done
cudaThreadSynchronize();
// your code to copy results to numReds[] go here
cudaMemcpy(numReds,device_Reds,NUMPARTICLES * sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time = %f\n", time);
dumpResults(numReds);
}
void initPos(float *p) {
// your code for initializing pos goes here
//same action as findReds.c (but for each gpu used in this cu file)
int i;
int roll;
for (i=0; i<NUMPARTICLES; i++) {
p[i*4] = rand() / (float) RAND_MAX;
p[i*4+1] = rand() / (float) RAND_MAX;
p[i*4+2] = rand() / (float) RAND_MAX;
roll = rand() % 3;
if (roll == 0)
p[i*4+3] = 0xff0000;
else if (roll == 1)
p[i*4+3] = 0x00ff00;
else
p[i*4+3] = 0x0000ff;
}
}
__device__ float findDistanceGPU(float *p, int i, int j) {
// your code for calculating distance for particle i and j
float dx, dy, dz;
dx = p[i*4] - p[j*4];
dy = p[i*4+1] - p[j*4+1];
dz = p[i*4+2] - p[j*4+2];
return(sqrt(dx*dx + dy*dy + dz*dz));
}
__global__ void findRedsGPU(float *p, int *numI) {
// your code for counting red particles goes here
//same action as findRedsGPU in findReds.c, but i is changed for different threads in different blocks
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j;
float distance;
numI[i] = 0;
for (j=0; j<NUMPARTICLES; j++) {
if (i!=j) {
/* calculate distance between particles i, j */
distance = findDistanceGPU(p, i, j);
/* if distance < r and color is red, increment count */
if (distance < NEIGHBORHOOD && p[j*4+3] == 0xff0000) {
numI[i]++;
}
}
}
}
void dumpResults(int index[]) {
int i;
FILE *fp;
fp = fopen("./dump.out", "w");
for (i=0; i<NUMPARTICLES; i++) {
fprintf(fp, "%d %d\n", i, index[i]);
}
fclose(fp);
}
|
7,667 | #include "includes.h"
__global__ void vecEps(float* a,const int N){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(a[i] < EPS && i < N)
a[i] = EPS;
} |
7,668 | /*
* a simple test
*/
__shared__ float data1[32];
__shared__ float data2[32];
__shared__ float data3[32];
__global__ void doit(int start, int end) {
int i;
for (i = 0; i < end; i++) {
data1[i-start] = data2[i-start] + data3[i-start];
}
}
__global__ void doit1(int start, int end) {
int i;
float * p1 = &data2[2];
for (i = 0; i < end; i++) {
data1[i-start] = p1[i-start-2] + data3[i-start];
}
}
|
7,669 | #include <stdio.h>
#include <math.h>
#include <stdint.h> //uint32_tは符号なしintで4バイトに指定
#include <stdlib.h> //記憶域管理を使うため
#include <cuda.h>
//記号定数として横幅と縦幅を定義
#define width 1024
#define heigth 1024
#define pixel width*heigth
//画像生成用の配列
float lumi_intensity[pixel]; //光強度用の配列
unsigned char img[pixel]; //bmp用の配列
/*--------------------bmpの構造体--------------------*/
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER{ //構造体BITMAPFILEHEADERはファイルの先頭に来るもので,サイズは14 byte
unsigned short bfType; //bfTypeは,bmp形式であることを示すため,"BM"が入る
uint32_t bfSize; //bfsizeは,ファイル全体のバイト数
unsigned short bfReserved1; //bfReserved1と2は予約領域で,0になる
unsigned short bfReserved2;
uint32_t bf0ffBits; //bf0ffBitsは先頭から画素データまでのバイト数
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERはbmpファイルの画像の情報の構造体で,サイズは40 byte
uint32_t biSize; //画像のサイズ
uint32_t biWidth; //横の画素数
uint32_t biHeight; //縦の画素数
unsigned short biPlanes; //1
unsigned short biBitCount; //一画素あたりの色の数のbit数.今回は8
uint32_t biCompression; //圧縮タイプを表す.bmpは非圧縮なので0
uint32_t biSizeImage; //bmp配列のサイズを表す.biCompression=0なら基本的に0
uint32_t biXPelsPerMeter; //biXPelsPerMeterとbiYPelsPerMeterは基本的に0
uint32_t biYPelsPerMeter;
uint32_t biCirUsed; //0
uint32_t biCirImportant; //0
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
/*--------------------------------------------------*/
/*--------------------フレネル近似のカーネル関数--------------------*/
__global__ void fresnel_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d){
int i, j, k;
int adr;
float xx, yy;
j = blockDim.x*blockIdx.x+threadIdx.x; //widthのループの置き換え
i = blockDim.y*blockIdx.y+threadIdx.y; //heigthのループの置き換え
adr = i*width+j;
//計算に必要な変数の定義
float wave_len=0.633F; //光波長
float wave_num=M_PI/wave_len; //波数の2分の1
for (k=0; k<284; k++) {
xx = ((float)j-x_d[k])*((float)j-x_d[k]);
yy = ((float)i-y_d[k])*((float)i-y_d[k]);
lumi_intensity_d[adr] = lumi_intensity_d[adr]+__cosf(wave_num*(xx+yy)*z_d[k]);
}
}
/*--------------------------------------------------*/
/*--------------------main関数--------------------*/
int main(){
BITMAPFILEHEADER bmpFh;
BITMAPINFOHEADER bmpIh;
RGBQUAD rgbQ[256];
//ホスト側の変数
int i;
int points; //物体点
float min = 0.0F, max = 0.0F, mid; //2値化に用いる
FILE *fp;
//3Dファイルの読み込み
fp = fopen("cube284.3d","rb"); //バイナリで読み込み
if (!fp) {
printf("3D file not found!\n");
exit(1);
}
fread(&points, sizeof(int), 1, fp); //データのアドレス,サイズ,個数,ファイルポインタを指定
printf("the number of points is %d\n", points);
//取り出した物体点を入れる配列
int x[points]; //~~データを読み込むことで初めてこの配列が定義できる~~
int y[points];
float z[points];
int x_buf, y_buf, z_buf; //データを一時的に溜めておくための変数
//各バッファに物体点座標を取り込み,ホログラム面と物体点の位置を考慮したデータを各配列に入れる
for (i=0; i<points; i++) {
fread(&x_buf, sizeof(int), 1, fp);
fread(&y_buf, sizeof(int), 1, fp);
fread(&z_buf, sizeof(int), 1, fp);
x[i] = x_buf*40+width*0.5; //物体点を離すために物体点座標に40を掛け,中心の座標を足す
y[i] = y_buf*40+heigth*0.5;
z[i] = 1.0F/(((float)z_buf)*40+10000.0F);
}
fclose(fp);
/*--------------------GPUによるCGH計算--------------------*/
int *x_d, *y_d;
float *z_d;
float *lumi_intensity_d;
dim3 block(32,32,1); //ブロックサイズ(スレッド数)の配置
dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //グリッドサイズ(ブロック数)の配置
// dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1);
//デバイス側のメモリ確保
cudaMalloc((void**)&x_d, points*sizeof(int));
cudaMalloc((void**)&y_d, points*sizeof(int));
cudaMalloc((void**)&z_d, points*sizeof(float));
cudaMalloc((void**)&lumi_intensity_d, pixel*sizeof(float));
//ホスト側からデバイス側にデータ転送
cudaMemcpy(x_d, x, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(z_d, z, points*sizeof(float), cudaMemcpyHostToDevice);
//カーネル関数の起動
fresnel_gpu<<< grid, block >>>(x_d, y_d, z_d, lumi_intensity_d);
//デバイス側からホスト側にデータ転送
cudaMemcpy(lumi_intensity, lumi_intensity_d, pixel*sizeof(float), cudaMemcpyDeviceToHost);
//デバイスのメモリ解放
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(lumi_intensity_d);
/*--------------------------------------------------*/
//最大値,最小値を求める
for (i=0; i<pixel; i++) {
if (min>lumi_intensity[i]) {
min = lumi_intensity[i];
}
if (max<lumi_intensity[i]) {
max = lumi_intensity[i];
}
}
mid = (min+max)/2; //中間値(閾値)を求める
printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid);
//各々の光強度配列の値を中間値と比較し,2値化する
for (i=0; i<pixel; i++) {
if (lumi_intensity[i]<mid) {
img[i] = 0;
}
else{
img[i] = 255;
}
}
/*--------------------BMP関連--------------------*/
//BITMAPFILEHEADERの構造体
bmpFh.bfType = 19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778
bmpFh.bfSize = 14+40+1024+(pixel); //1024はカラーパレットのサイズ.256階調で4 byte一組
bmpFh.bfReserved1 = 0;
bmpFh.bfReserved2 = 0;
bmpFh.bf0ffBits = 14+40+1024;
//BITMAPINFOHEADERの構造体
bmpIh.biSize = 40;
bmpIh.biWidth = width;
bmpIh.biHeight = heigth;
bmpIh.biPlanes = 1;
bmpIh.biBitCount = 8;
bmpIh.biCompression = 0;
bmpIh.biSizeImage = 0;
bmpIh.biXPelsPerMeter = 0;
bmpIh.biYPelsPerMeter = 0;
bmpIh.biCirUsed = 0;
bmpIh.biCirImportant = 0;
//RGBQUADの構造体
for (i=0; i<256; i++) {
rgbQ[i].rgbBlue = i;
rgbQ[i].rgbGreen = i;
rgbQ[i].rgbRed = i;
rgbQ[i].rgbReserved = 0;
}
/*--------------------------------------------------*/
fp = fopen("fresnel-gpu.bmp","wb"); //宣言したfpと使用するファイル名,その読み書きモードを設定.バイナリ(b)で書き込み(w)
fwrite(&bmpFh, sizeof(bmpFh), 1, fp); //書き込むデータのアドレス,データのサイズ,データの個数,ファイルのポインタを指定
fwrite(&bmpIh, sizeof(bmpIh), 1, fp); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);というように個別に書くことも可能
fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp);
fwrite(img, sizeof(unsigned char), pixel, fp); //bmpに書き込み
printf("'fresnel-gpu.bmp' was saved.\n\n");
fclose(fp);
return 0;
}
|
7,670 | #include "../ginkgo/loaddata.hpp"
#include "../ginkgo/GOrder.h"
#include "../ginkgo/GOrderList.h"
#include "../ginkgo/GOrderHandler.h"
#include "../include/lglist.h"
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
#define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin())
#define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos))
#define gpu_seq(x) thrust::sequence((x).begin(), (x).end())
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
const int level_lim = 90;
const int order_lim = 100;
__global__ void simKernel(int N_tstamp, int base_p, int *booksize, int *ask, int *bid,
int *tprice, int *tsize, int *tside, float *t_stamp, float *ltcy, int *ans){
int max_position = 15;
int level_order_lim = 5;
gpu_ginkgo::OrderHandler<order_lim, level_lim> ohandler(base_p, level_order_lim);
ohandler.loadStrategy(max_position, 0., 0.);
for(int t=0;t<N_tstamp;++t){
ohandler.getTimeInfo(t_stamp[t], ltcy[t]);
if(!tside[t]){
ohandler.bookUpdateSim(booksize+t*level_lim, ask[t], bid[t], 0.5*(ask[t] + bid[t]));
ohandler.cancelAndSendNewOrders();
}
else{
bool sell = (tside[t] == -1);
ohandler.processTrade(sell, tprice[t], tsize[t]);
ohandler.cancelAndSendNewOrders();
}
if(t%100 == 0){
ohandler.showBasicInfo();
}
}
ans[0] = ohandler.total_pnl;
ans[1] = ohandler.pos;
ans[2] = ohandler.total_qty;
}
int main(int argc, char* argv[]){
assert(argc > 1);
LoadData ld(argv[1], 0.1);
int Level_lim = ld.preProcess();
auto bzs = ld.getBookSize();
auto ask = ld.getAsk();
auto bid = ld.getBid();
auto tsz = ld.getTradeSize();
auto tsd = ld.getTradeSide();
auto tp = ld.getTradePrice();
auto tstamps = ld.getTimeStamp();
auto base_p = ld.getBasePrice();
int Ns = (int)ask.size();
cout<<endl;
cout<<ask[0]<<' '<<bid[0]<<endl;
cout<<tsz[0]<<' '<<tsd[0]<<' '<<tp[0]<<endl;
cout<<tstamps[0]<<endl;
cout<<Level_lim <<' '<<base_p<<endl;
cout<<"====================== Start simulation ======================"<<endl<<endl;
def_dvec(int) d_bz(Ns * level_lim, 0), d_ap(Ns), d_bp(Ns), d_tsz(Ns), d_tp(Ns), d_tsd(Ns);
def_dvec(float) d_t(Ns), d_ltcy(Ns, 0.);
gpu_copy(ask, d_ap);
gpu_copy(bid, d_bp);
gpu_copy(tsz, d_tsz);
gpu_copy(tsd, d_tsd);
gpu_copy(tp, d_tp);
gpu_copy(tstamps, d_t);
for(int i=0;i<Ns;++i){
gpu_copy_to(bzs[i], d_bz, i*level_lim);
}
def_dvec(int) ans(3, 0);
cudaEvent_t start, stop;
float cuda_time;
cudaEventCreate(&start); // creating the event 1
cudaEventCreate(&stop); // creating the event 2
cudaEventRecord(start, 0);
//Running the kernel
simKernel<<<1, 2>>>(Ns, base_p, to_ptr(d_bz), to_ptr(d_ap), to_ptr(d_bp),
to_ptr(d_tp), to_ptr(d_tsz), to_ptr(d_tsd), to_ptr(d_t), to_ptr(d_ltcy), to_ptr(ans));
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cuda_time, start, stop); // Saving the time measured
cout<<"Time Usage for sim is: "<<cuda_time/1000<<"s"<<endl;
cout<<"Total pnl = "<<ans[0]<<endl;
cout<<"Current Position = "<<ans[1]<<endl;
cout<<"Total trades = "<<ans[2]<<endl;
return 0;
} |
7,671 | #include <sys/time.h>
#include <stdio.h>
#include<math.h>
//TODO for writing to file, will be deleted
//#include <stdlib.h>
#include <cuda_runtime.h>
#define NUM_STREAMS 16
// time stamp function in ms
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000 + tv.tv_sec*1000 ;
}
void initData(float* data, int n){
int i,j,k;
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
for(k = 0; k < n; k++){
data[i*n*n + j*n + k] = (float) (i+j+k)*(float)1.1;
}
}
}
}
void debugPrint(float* data, int n){
int i,j,k;
for(i = 0; i < 3; i++){
printf("--------layer %d--------\n",i);
for(j = 0; j < n; j++){
for(k = 0; k < n; k++){
printf("%lf ",data[i*n*n + j*n + k]);
}
printf("\n");
}
printf("\n");
}
printf("\n");
}
// host side matrix addition
void h_stencil(float *a, float *b, int n){
int i,j,k;
for(i = 1; i < n-1; i++){
for(j = 1; j < n-1; j++){
for(k = 1; k < n-1; k++){
a[i*n*n + j*n + k] = ((float)0.8)*(b[(i-1)*n*n+j*n+k]+b[(i+1)*n*n+j*n+k]+b[i*n*n+(j-1)*n+k]+b[i*n*n+(j+1)*n+k]+b[i*n*n+j*n+(k-1)]+b[i*n*n+j*n+(k+1)]);
}
}
}
}
// host side validation
bool val(float *a, float *b, int n){
int i,j,k;
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
for(k = 0; k < n; k++){
if(a[i*n*n + j*n + k] != b[i*n*n+j*n+k]){
//printf("%d,%d,%d expect %lf, actual %lf\n",i,j,k,a[i*n*n + j*n + k],b[i*n*n+j*n+k]);
return false;
}
}
}
}
return true;
}
double h_sum(float *data, int n){
int i,j,k;
double ret=0;
for(i = 1; i < n-1; i++){
for(j = 1; j < n-1; j++){
for(k = 1; k < n-1; k++){
ret += data[i*n*n + j*n + k]*(((i+j+k)%2)?1:-1);
}
}
}
return ret;
}
double h_rsum(float *data, int n){
int i,j,k;
double ret=0;
for(i = 1; i < n-1; i++){
for(j = 1; j < n-1; j++){
for(k = 1; k < n-1; k++){
ret += roundf(data[i*n*n + j*n + k]*100)/100*(((i+j+k)%2)?1:-1);
}
}
}
return ret;
}
__device__ void globalToShared(float *sm, float *b, int l, int n, int smx, int smy, int ix, int iy){
sm[smx+smy*(blockDim.x+2)] = b[ix + iy*n + l*n*n];
if(smx==1){
sm[0+smy*(blockDim.x+2)] = b[ix-1 + iy*n + l*n*n];
}
if(smx==blockDim.x || ix==n-2){
sm[smx+1+smy*(blockDim.x+2)] = b[ix+1 + iy*n + l*n*n];
}
if(smy==1){
sm[smx] = b[ix + (iy-1)*n + l*n*n];
}
if(smy==blockDim.y || iy==n-2){
sm[smx+(smy+1)*(blockDim.x+2)] = b[ix + (iy+1)*n + l*n*n];
}
}
__global__ void kernal( float *a, float *b, int n, int height){
extern __shared__ float sm[];
int ix = threadIdx.x + 1;
int iy = threadIdx.y + 1;
int gx = threadIdx.x + 1 + blockIdx.x*blockDim.x;
int gy = threadIdx.y + 1 + blockIdx.y*blockDim.y;
float down,up,self;
float l1,l2,l3,l4;
if(gx<n-1&&gy<n-1){
down = b[gx+gy*n];
globalToShared(sm, b, 1, n, ix, iy, gx, gy);
__syncthreads();
self = sm[ix + iy*(blockDim.x+2)];
l1 = sm[ix-1 + iy*(blockDim.x+2)];
l2 = sm[ix+1 + iy*(blockDim.x+2)];
l3 = sm[ix + (iy-1)*(blockDim.x+2)];
l4 = sm[ix + (iy+1)*(blockDim.x+2)];
__syncthreads();
int layer;
#pragma unroll
for(layer = 2; layer < height; layer++){
globalToShared(sm, b, layer, n, ix, iy, gx, gy);
__syncthreads();
up = sm[ix + iy*(blockDim.x+2)];
a[gx + gy*n + (layer-1)*n*n] = ((float)0.8)*(down+up+l1+l2+l3+l4);
down = self;
self = up;
l1 = sm[ix-1 + iy*(blockDim.x+2)];
l2 = sm[ix+1 + iy*(blockDim.x+2)];
l3 = sm[ix + (iy-1)*(blockDim.x+2)];
l4 = sm[ix + (iy+1)*(blockDim.x+2)];
__syncthreads();
}
}
}
int main( int argc, char *argv[] ) {
// get program arguments
if( argc != 2) {
printf("Error: wrong number of args\n") ;
exit(1) ;
}
int n = atoi( argv[1] );
//int pad_n = n + 32 - (n-2)%32;
int noElems = n*n*n ;
int bytes = noElems * sizeof(float) ;
// alloc memory host-side
float *h_A = (float *) malloc( bytes ) ;
//float *h_B = (float *) malloc( bytes ) ;
//float *h_dA = (float *) malloc( bytes ) ;
float *h_B;
float *h_dA;
cudaMallocHost((void**)&h_B,bytes);
cudaMallocHost((void**)&h_dA,bytes);
// init matrices with random data
initData(h_B, n);
memset(h_A, 0.0, bytes);
memset(h_dA, 0.0, bytes);
// alloc memory dev-side
float *d_A, *d_B ;
cudaMalloc( (void **) &d_A, bytes ) ;
cudaMalloc( (void **) &d_B, bytes ) ;
//debugPrint(h_B, n);
// invoke Kernel
dim3 block(32, 32);
dim3 grid((n-2+block.x-1)/block.x,(n-2+block.y-1)/block.y);
double timeStampA = getTimeStamp() ;
double timeStampD = getTimeStamp() ;
if(n>=250){
//transfer data to dev
//stream creation
int batch_h = (n+NUM_STREAMS-1)/NUM_STREAMS;
int batch_size = n*n*batch_h;
int last_batch = noElems-(NUM_STREAMS-1)*batch_size;
int b_size[NUM_STREAMS];
b_size[0] = batch_h;
b_size[NUM_STREAMS-1] = n-(NUM_STREAMS-1)*batch_h + 2;
for(int k = 1; k < NUM_STREAMS-1; k++){
b_size[k] = batch_h+2;
}
int offset[NUM_STREAMS];
offset[0] = 0;
for(int k = 1; k < NUM_STREAMS; k++){
offset[k] = k*batch_h-2;
}
//for(int k = 0; k < NUM_STREAMS; k++){
// printf("b_size %d is %d\n",k,b_size[k]);
// printf("off %d is %d\n",k,offset[k]);
//}
timeStampA = getTimeStamp() ;
cudaStream_t stream[NUM_STREAMS+1];
for (int i = 1; i < NUM_STREAMS; i++){
cudaStreamCreate(&(stream[i]));
cudaMemcpyAsync(&d_B[(i-1)*batch_size],&h_B[(i-1)*batch_size],batch_size*sizeof(float),cudaMemcpyHostToDevice,stream[i]);
kernal<<<grid,block,(1024+33*4)*sizeof(float)>>>(d_A+n*n*offset[i-1],d_B+n*n*offset[i-1],n,b_size[i-1]);
cudaMemcpyAsync(&h_dA[n*n*(1+offset[i-1])],&d_A[n*n*(1+offset[i-1])],(b_size[i-1]-2)*n*n*sizeof(float),cudaMemcpyDeviceToHost,stream[i]);
}
cudaStreamCreate(&(stream[NUM_STREAMS]));
cudaMemcpyAsync(&d_B[(NUM_STREAMS-1)*batch_size],&h_B[(NUM_STREAMS-1)*batch_size],last_batch*sizeof(float),cudaMemcpyHostToDevice,stream[NUM_STREAMS]);
kernal<<<grid,block,(1024+33*4)*sizeof(float)>>>(d_A+n*n*offset[NUM_STREAMS-1],d_B+n*n*offset[NUM_STREAMS-1],n,b_size[NUM_STREAMS-1]);
cudaMemcpyAsync(&h_dA[n*n*(1+offset[NUM_STREAMS-1])],&d_A[n*n*(1+offset[NUM_STREAMS-1])],(b_size[NUM_STREAMS-1]-2)*n*n*sizeof(float),cudaMemcpyDeviceToHost,stream[NUM_STREAMS]);
//sync all streams and done
for(int i = 1; i < NUM_STREAMS+1; i++){
cudaStreamSynchronize(stream[i]);
}
timeStampD = getTimeStamp() ;
}else{
timeStampA = getTimeStamp() ;
//transfer data to dev
cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice ) ;
//debugPrint(h_B, n);
// invoke Kernel
dim3 block(32, 32);
dim3 grid((n-2+block.x-1)/block.x,(n-2+block.y-1)/block.y);
kernal<<<grid,block,(1024+33*4)*sizeof(float)>>>(d_A,d_B,n,n);
cudaDeviceSynchronize() ;
//cudaDeviceProp GPUprop;
//cudaGetDeviceProperties(&GPUprop,0);
//printf("maxgridsize x is %d\n",GPUprop.maxGridSize[0]);
//copy data back
cudaMemcpy( h_dA, d_A, bytes, cudaMemcpyDeviceToHost ) ;
timeStampD = getTimeStamp() ;
}
h_stencil(h_A,h_B,n);
//h_dA = h_A;
bool match = val(h_A,h_dA,n);
//float h_Result = h_rsum(h_A,n);
float h_dResult = h_sum(h_dA,n);
// print out results
//if(!memcmp(h_A,h_dA,n*n*n*sizeof(float))){
if(match){
//debugPrint(h_A, n);
//debugPrint(h_dC, nx, ny);
//FILE* fptr;
//fptr = fopen("time.log","a");
//fprintf(fptr,"%d: %lf %.6f\n", n, h_dResult, timeStampD-timeStampA);
//fclose(fptr);
//printf("%lf %lf %d\n", h_dResult, h_Result, (int)round(timeStampD-timeStampA));
printf("%lf %d\n", h_dResult, (int)round(timeStampD-timeStampA));
}else{
//debugPrint(h_A, n);
//debugPrint(h_dA, n);
//FILE* fptr;
//fptr = fopen("time.log","a");
//fprintf(fptr,"%d Error: function failed.\n", n);
//fclose(fptr);
printf("Error: function failed.\n");
}
// free GPU resources
cudaFree(d_A);
cudaFree(d_B);
cudaDeviceReset();
}
|
7,672 |
#include "Particle_cuda.cuh"
#include "kernels.cuh"
dim3 gridSize = 512;
dim3 blockSize = 256;
void SetDrawArray(float *ptr, float *x, float *y, int n)
{
set_draw_array_kernel<<< gridSize, blockSize>>>(ptr, x, y, n);
}
void ResetArrays(int *mutex, float *x, float *y, float *mass, int *count, int *start, int *sorted, int *child, int *index, float *left, float *right, float *bottom, float *top, int n, int m)
{
reset_arrays_kernel<<< gridSize, blockSize >>>(mutex, x, y, mass, count, start, sorted, child, index, left, right, bottom, top, n, m);
}
void ComputeBoundingBox(int *mutex, float *x, float *y, float *left, float *right, float *bottom, float *top, int n)
{
compute_bounding_box_kernel<<< gridSize, blockSize >>>(mutex, x, y, left, right, bottom, top, n);
}
void BuildQuadTree(float *x, float *y, float *mass, int *count, int *start, int *child, int *index, float *left, float *right, float *bottom, float *top, int n, int m)
{
build_tree_kernel<<< gridSize, blockSize >>>(x, y, mass, count, start, child, index, left, right, bottom, top, n, m);
}
void ComputeCentreOfMass(float *x, float *y, float *mass, int *index, int n)
{
centre_of_mass_kernel<<<gridSize, blockSize>>>(x, y, mass, index, n);
}
void SortParticles(int *count, int *start, int *sorted, int *child, int *index, int n)
{
sort_kernel<<< gridSize, blockSize >>>(count, start, sorted, child, index, n);
}
void CalculateForces(float* x, float *y, float *vx, float *vy, float *ax, float *ay, float *mass, int *sorted, int *child, float *left, float *right, int n, float g)
{
compute_forces_kernel<<< gridSize, blockSize >>>(x, y, vx, vy, ax, ay, mass, sorted, child, left, right, n, g);
}
void IntegrateParticles(float *x, float *y, float *vx, float *vy, float *ax, float *ay, int n, float dt, float d)
{
update_kernel<<<gridSize, blockSize >>>(x, y, vx, vy, ax, ay, n, dt, d);
}
void FillOutputArray(float *x, float *y, float *out, int n)
{
copy_kernel<<<gridSize, blockSize >>>(x, y, out, n);
}
|
7,673 | #include <cuda.h>
#include <stdio.h>
#define SIZE 4
int *d_arr;
int *h_arr;
__device__ void device(int *d_arr) {
printf("%p\n", d_arr);
}
__global__ void kernel(int *d_arr) {
device(d_arr);
}
__host__ int main () {
h_arr = (int *) malloc(SIZE);
cudaMalloc((void **) &d_arr, SIZE);
cudaMemcpy(d_arr, h_arr, SIZE, cudaMemcpyHostToDevice);
kernel<<<1, 1>>>(d_arr);
free(h_arr);
cudaFree(d_arr);
return 0;
} |
7,674 | #include "includes.h"
__global__ void perturbPositions( float *blockpos, float4 *initpos, float delta, int *blocks, int numblocks, int setnum, int N ) {
int blockNum = blockIdx.x * blockDim.x + threadIdx.x;
if( blockNum >= numblocks ) {
return;
}
int dof = 3 * blocks[blockNum] + setnum;
int atom = dof / 3;
if( atom >= N || ( blockNum != ( numblocks - 1 ) && atom >= blocks[blockNum + 1] ) ) {
return; // Out of bounds
}
int axis = dof % 3;
if( axis == 0 ) {
blockpos[dof] = initpos[atom].x;
initpos[atom].x += delta;
} else if( axis == 1 ) {
blockpos[dof] = initpos[atom].y;
initpos[atom].y += delta;
} else {
blockpos[dof] = initpos[atom].z;
initpos[atom].z += delta;
}
} |
7,675 | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
__global__ void default_function_kernel0(const float* __restrict__ Data,
const float* __restrict__ K0,
const float* __restrict__ K1,
const float* __restrict__ KC,
float* __restrict__ Output) {
float Output_local[4];
__shared__ float pad_temp_shared[64];
__shared__ float K0_shared[4];
__shared__ float K1_shared[1];
__shared__ float KC_shared[3];
for (int nn_inner_outer = 0; nn_inner_outer < 4; ++nn_inner_outer) {
for (int ocb_inner_outer = 0; ocb_inner_outer < 2; ++ocb_inner_outer) {
for (int oca_c_init = 0; oca_c_init < 2; ++oca_c_init) {
for (int ww_c_init = 0; ww_c_init < 2; ++ww_c_init) {
Output_local[((oca_c_init * 2) + ww_c_init)] = 0.000000e+00f;
}
}
for (int rr_outer = 0; rr_outer < 11; ++rr_outer) {
for (int rca_outer = 0; rca_outer < 2; ++rca_outer) {
for (int rcb_outer = 0; rcb_outer < 4; ++rcb_outer) {
for (int rw_outer = 0; rw_outer < 3; ++rw_outer) {
__syncthreads();
for (int ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner < 8; ++ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) {
pad_temp_shared[(((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 8)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner)] = ((((((1 - (((((int)threadIdx.x) * 8) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) / 8)) <= (((int)blockIdx.y) * 2)) && ((((int)blockIdx.y) * 2) < (33 - (((((int)threadIdx.x) * 8) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) / 8)))) && (((1 - ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) - rw_outer) <= (((int)blockIdx.x) * 8))) && ((((int)blockIdx.x) * 8) < ((33 - ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) - rw_outer))) ? Data[(((((((((((((((int)blockIdx.z) / 4) * 65536) + (((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 8)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) / 64) * 16384)) + (nn_inner_outer * 16384)) + (rca_outer * 8192)) + ((((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 8)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) % 64) / 32) * 4096)) + (rcb_outer * 1024)) + (((int)blockIdx.y) * 64)) + ((((((int)threadIdx.x) * 8) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) / 8) * 32)) + (((int)blockIdx.x) * 8)) + rw_outer) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) - 33)] : 0.000000e+00f);
}
if ((((int)threadIdx.y) * 2) < (4 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 4) < (4 - rr_outer)) {
K0_shared[((((int)threadIdx.y) * 2) + ((int)threadIdx.x))] = K0[(((((((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) / 4) * 44) + (rr_outer * 44)) + (rca_outer * 22)) + (((((((int)threadIdx.y) * 2) + ((int)threadIdx.x)) % 4) / 2) * 11)) + (((((int)blockIdx.z) % 4) / 2) * 2)) + (((int)threadIdx.x) % 2))];
}
}
}
if (((int)threadIdx.x) < (1 - ((int)threadIdx.y))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < ((4 - rr_outer) - ((int)threadIdx.y))) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.y))] = K1[((((((((int)threadIdx.x) * 44) + (((int)threadIdx.y) * 44)) + (rr_outer * 44)) + (rcb_outer * 11)) + ((((int)blockIdx.z) % 2) * 2)) + ocb_inner_outer)];
}
}
}
if ((((int)threadIdx.y) * 2) < (3 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
KC_shared[((((int)threadIdx.y) * 2) + ((int)threadIdx.x))] = KC[((((((int)threadIdx.y) * 66) + (((int)threadIdx.x) * 33)) + (rw_outer * 11)) + rr_outer)];
}
}
__syncthreads();
for (int rca_inner = 0; rca_inner < 2; ++rca_inner) {
for (int rh_inner = 0; rh_inner < 3; ++rh_inner) {
for (int oca_c = 0; oca_c < 2; ++oca_c) {
for (int ww_c = 0; ww_c < 2; ++ww_c) {
Output_local[((oca_c * 2) + ww_c)] = (Output_local[((oca_c * 2) + ww_c)] + (((pad_temp_shared[(((((rca_inner * 32) + (((int)threadIdx.y) * 8)) + (rh_inner * 8)) + (((int)threadIdx.x) * 2)) + ww_c)] * K0_shared[((rca_inner * 2) + oca_c)]) * K1_shared[0]) * KC_shared[rh_inner]));
}
}
}
}
}
}
}
}
for (int oca_inner_inner_inner = 0; oca_inner_inner_inner < 2; ++oca_inner_inner_inner) {
for (int ww_inner_inner_inner = 0; ww_inner_inner_inner < 2; ++ww_inner_inner_inner) {
Output[((((((((((((((int)blockIdx.z) / 4) * 65536) + (nn_inner_outer * 16384)) + (((((int)blockIdx.z) % 4) / 2) * 8192)) + (oca_inner_inner_inner * 4096)) + ((((int)blockIdx.z) % 2) * 2048)) + (ocb_inner_outer * 1024)) + (((int)blockIdx.y) * 64)) + (((int)threadIdx.y) * 32)) + (((int)blockIdx.x) * 8)) + (((int)threadIdx.x) * 2)) + ww_inner_inner_inner)] = Output_local[((oca_inner_inner_inner * 2) + ww_inner_inner_inner)];
}
}
}
}
}
void Conv2dRcpFusedNchwKernelLauncher(const float* U, const float* K0,
const float* K1, const float* KC, float* V){
dim3 gridDim0(4, 16, 64);
dim3 blockDim0(4, 2, 1);
default_function_kernel0<<<gridDim0, blockDim0>>>(U, K0, K1, KC, V);
cudaDeviceSynchronize();
}
#endif
|
7,676 |
extern "C"
__global__ void
TransformRigid2D(const double *input, double *output, int n, double cos_r, double sin_r, double tx, double ty) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= n) return;
double ix = input[2 * i + 0];
double iy = input[2 * i + 1];
double ox = ix * cos_r - iy * sin_r + tx;
double oy = ix * sin_r + iy * cos_r + ty;
output[2 * i + 0] = ox;
output[2 * i + 1] = oy;
} |
7,677 | #include<stdio.h>
#include<cuda_runtime.h>
#include<math.h>
#define CHANNELS 3
#define SIZE 256
#define PI 3.14
float h_rgb[SIZE*SIZE*CHANNELS],h_grayscale[SIZE*SIZE];
__global__ void grayscale_kernel(float *d_rgb,float *d_grayscale)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int px = bx*blockDim.x+tx;
int py = by*blockDim.y+ty;
int ch,i,j;
float r = d_rgb[py*SIZE*CHANNELS+px*CHANNELS+0];
float g = d_rgb[py*SIZE*CHANNELS+px*CHANNELS+1];
float b = d_rgb[py*SIZE*CHANNELS+px*CHANNELS+2];
d_grayscale[py*SIZE+px] = 0.21*r + 0.71*g + 0.07*b;
}
void convert_rgb_grayscale(float *d_rgb,float *d_grayscale)
{
dim3 dimBlock(32,32,1);
dim3 dimGrid(SIZE/dimBlock.x,SIZE/dimBlock.y);
grayscale_kernel<<<dimGrid,dimBlock>>>(d_rgb,d_grayscale);
}
void initialise_image()
{
int i,j,k;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
for(k=0;k<CHANNELS;k++)
h_rgb[i*SIZE*CHANNELS + j*CHANNELS + k] =rand()%256;
}
}
}
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
cudaSetDevice(deviceId);
initialise_image();
float *d_rgb,*d_grayscale;
const int RGB_BYTES = SIZE*SIZE*CHANNELS*sizeof(float);
const int GRAYSCALE_BYTES = SIZE*SIZE*sizeof(float);
cudaMalloc((void**)&d_rgb,RGB_BYTES);
cudaMalloc((void**)&d_grayscale,GRAYSCALE_BYTES);
cudaMemcpy(d_rgb,h_rgb,RGB_BYTES,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
convert_rgb_grayscale(d_rgb,d_grayscale);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaMemcpy(h_grayscale,d_grayscale,GRAYSCALE_BYTES,cudaMemcpyDeviceToHost);
printf("Elapsed time is %f\n",elapsedTime);
cudaFree(d_grayscale);
cudaFree(d_rgb);
return 0;
}
|
7,678 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_0 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
#pragma begin stencil0 unroll k=1,j=1,i=1
flux_0[k][j][i] = (0.8*(cons_1[k][j][i+1] - cons_1[k][j][i-1]) - 0.2*(cons_1[k][j][i+2] - cons_1[k][j][i-2]) + 0.038*(cons_1[k][j][i+3] - cons_1[k][j][i-3]) - 0.0035*(cons_1[k][j][i+4] - cons_1[k][j][i-4]))*dxinv0;
flux_0[k][j][i] -= ((0.8*(cons_2[k][j+1][i] - cons_2[k][j-1][i]) - 0.2*(cons_2[k][j+2][i] - cons_2[k][j-2][i]) + 0.038*(cons_2[k][j+3][i] - cons_2[k][j-3][i]) - 0.0035*(cons_2[k][j+4][i] - cons_2[k][j-4][i]))*dxinv1);
flux_1[k][j][i] = ((0.8*(cons_1[k][j][i+1]*q_1[k][j][i+1]-cons_1[k][j][i-1]*q_1[k][j][i-1]+(q_4[k][j][i+1]-q_4[k][j][i-1]))-0.2*(cons_1[k][j][i+2]*q_1[k][j][i+2]-cons_1[k][j][i-2]*q_1[k][j][i-2]+(q_4[k][j][i+2]-q_4[k][j][i-2]))+0.038*(cons_1[k][j][i+3]*q_1[k][j][i+3]-cons_1[k][j][i-3]*q_1[k][j][i-3]+(q_4[k][j][i+3]-q_4[k][j][i-3]))-0.0035*(cons_1[k][j][i+4]*q_1[k][j][i+4]-cons_1[k][j][i-4]*q_1[k][j][i-4]+(q_4[k][j][i+4]-q_4[k][j][i-4])))*dxinv0);
flux_1[k][j][i] -= (0.8*(cons_1[k][j+1][i]*q_2[k][j+1][i]-cons_1[k][j-1][i]*q_2[k][j-1][i])-0.2*(cons_1[k][j+2][i]*q_2[k][j+2][i]-cons_1[k][j-2][i]*q_2[k][j-2][i])+0.038*(cons_1[k][j+3][i]*q_2[k][j+3][i]-cons_1[k][j-3][i]*q_2[k][j-3][i])-0.0035*(cons_1[k][j+4][i]*q_2[k][j+4][i]-cons_1[k][j-4][i]*q_2[k][j-4][i]))*dxinv1;
flux_2[k][j][i] = ((0.8*(cons_2[k][j][i+1]*q_1[k][j][i+1]-cons_2[k][j][i-1]*q_1[k][j][i-1])-0.2*(cons_2[k][j][i+2]*q_1[k][j][i+2]-cons_2[k][j][i-2]*q_1[k][j][i-2])+0.038*(cons_2[k][j][i+3]*q_1[k][j][i+3]-cons_2[k][j][i-3]*q_1[k][j][i-3])-0.0035*(cons_2[k][j][i+4]*q_1[k][j][i+4]-cons_2[k][j][i-4]*q_1[k][j][i-4]))*dxinv0);
flux_2[k][j][i] -= (0.8*(cons_2[k][j+1][i]*q_2[k][j+1][i]-cons_2[k][j-1][i]*q_2[k][j-1][i]+(q_4[k][j+1][i]-q_4[k][j-1][i]))-0.2*(cons_2[k][j+2][i]*q_2[k][j+2][i]-cons_2[k][j-2][i]*q_2[k][j-2][i]+(q_4[k][j+2][i]-q_4[k][j-2][i]))+0.038*(cons_2[k][j+3][i]*q_2[k][j+3][i]-cons_2[k][j-3][i]*q_2[k][j-3][i]+(q_4[k][j+3][i]-q_4[k][j-3][i]))-0.0035*(cons_2[k][j+4][i]*q_2[k][j+4][i]-cons_2[k][j-4][i]*q_2[k][j-4][i]+(q_4[k][j+4][i]-q_4[k][j-4][i])))*dxinv1;
flux_3[k][j][i] = ((0.8*(cons_3[k][j][i+1]*q_1[k][j][i+1]-cons_3[k][j][i-1]*q_1[k][j][i-1])-0.2*(cons_3[k][j][i+2]*q_1[k][j][i+2]-cons_3[k][j][i-2]*q_1[k][j][i-2])+0.038*(cons_3[k][j][i+3]*q_1[k][j][i+3]-cons_3[k][j][i-3]*q_1[k][j][i-3])-0.0035*(cons_3[k][j][i+4]*q_1[k][j][i+4]-cons_3[k][j][i-4]*q_1[k][j][i-4]))*dxinv0);
flux_3[k][j][i] -= (0.8*(cons_3[k][j+1][i]*q_2[k][j+1][i]-cons_3[k][j-1][i]*q_2[k][j-1][i])-0.2*(cons_3[k][j+2][i]*q_2[k][j+2][i]-cons_3[k][j-2][i]*q_2[k][j-2][i])+0.038*(cons_3[k][j+3][i]*q_2[k][j+3][i]-cons_3[k][j-3][i]*q_2[k][j-3][i])-0.0035*(cons_3[k][j+4][i]*q_2[k][j+4][i]-cons_3[k][j-4][i]*q_2[k][j-4][i]))*dxinv1;
#pragma end stencil0
}
}
__global__ void hypterm_1 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(4*blockdim_k);
int k = max (k0, 0) + (int)(4*threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double flux0_a, flux1_a, flux2_a, flux3_a;
double flux0_b, flux1_b, flux2_b, flux3_b;
double flux0_c, flux1_c, flux2_c, flux3_c;
double flux0_d, flux1_d, flux2_d, flux3_d;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
#pragma begin stencil1 unroll k=1,j=1,i=1
flux0_a = flux_0[k][j][i];
flux_0[k][j][i] = flux0_a - ((0.8*(cons_3[k+1][j][i] - cons_3[k-1][j][i]) - 0.2*(cons_3[k+2][j][i] - cons_3[k-2][j][i]) + 0.038*(cons_3[k+3][j][i] - cons_3[k-3][j][i]) - 0.0035*(cons_3[k+4][j][i] - cons_3[k-4][j][i]))*dxinv2);
flux0_b = flux_0[k+1][j][i];
flux_0[k+1][j][i] = flux0_b - ((0.8*(cons_3[k+1+1][j][i] - cons_3[k+1-1][j][i]) - 0.2*(cons_3[k+1+2][j][i] - cons_3[k+1-2][j][i]) + 0.038*(cons_3[k+1+3][j][i] - cons_3[k+1-3][j][i]) - 0.0035*(cons_3[k+1+4][j][i] - cons_3[k+1-4][j][i]))*dxinv2);
flux0_c = flux_0[k+2][j][i];
flux_0[k+2][j][i] = flux0_c - ((0.8*(cons_3[k+2+1][j][i] - cons_3[k+2-1][j][i]) - 0.2*(cons_3[k+2+2][j][i] - cons_3[k+2-2][j][i]) + 0.038*(cons_3[k+2+3][j][i] - cons_3[k+2-3][j][i]) - 0.0035*(cons_3[k+2+4][j][i] - cons_3[k+2-4][j][i]))*dxinv2);
flux0_d = flux_0[k+3][j][i];
flux_0[k+3][j][i] = flux0_d - ((0.8*(cons_3[k+3+1][j][i] - cons_3[k+3-1][j][i]) - 0.2*(cons_3[k+3+2][j][i] - cons_3[k+3-2][j][i]) + 0.038*(cons_3[k+3+3][j][i] - cons_3[k+3-3][j][i]) - 0.0035*(cons_3[k+3+4][j][i] - cons_3[k+3-4][j][i]))*dxinv2);
flux1_a = flux_1[k][j][i];
flux_1[k][j][i] = flux1_a - (0.8*(cons_1[k+1][j][i]*q_3[k+1][j][i]-cons_1[k-1][j][i]*q_3[k-1][j][i])-0.2*(cons_1[k+2][j][i]*q_3[k+2][j][i]-cons_1[k-2][j][i]*q_3[k-2][j][i])+0.038*(cons_1[k+3][j][i]*q_3[k+3][j][i]-cons_1[k-3][j][i]*q_3[k-3][j][i])-0.0035*(cons_1[k+4][j][i]*q_3[k+4][j][i]-cons_1[k-4][j][i]*q_3[k-4][j][i]))*dxinv2;
flux1_b = flux_1[k+1][j][i];
flux_1[k+1][j][i] = flux1_b - (0.8*(cons_1[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_1[k+1-1][j][i]*q_3[k+1-1][j][i])-0.2*(cons_1[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_1[k+1-2][j][i]*q_3[k+1-2][j][i])+0.038*(cons_1[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_1[k+1-3][j][i]*q_3[k+1-3][j][i])-0.0035*(cons_1[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_1[k+1-4][j][i]*q_3[k+1-4][j][i]))*dxinv2;
flux1_c = flux_1[k+2][j][i];
flux_1[k+2][j][i] = flux1_c - (0.8*(cons_1[k+2+1][j][i]*q_3[k+2+1][j][i]-cons_1[k+2-1][j][i]*q_3[k+2-1][j][i])-0.2*(cons_1[k+2+2][j][i]*q_3[k+2+2][j][i]-cons_1[k+2-2][j][i]*q_3[k+2-2][j][i])+0.038*(cons_1[k+2+3][j][i]*q_3[k+2+3][j][i]-cons_1[k+2-3][j][i]*q_3[k+2-3][j][i])-0.0035*(cons_1[k+2+4][j][i]*q_3[k+2+4][j][i]-cons_1[k+2-4][j][i]*q_3[k+2-4][j][i]))*dxinv2;
flux1_d = flux_1[k+3][j][i];
flux_1[k+3][j][i] = flux1_d - (0.8*(cons_1[k+3+1][j][i]*q_3[k+3+1][j][i]-cons_1[k+3-1][j][i]*q_3[k+3-1][j][i])-0.2*(cons_1[k+3+2][j][i]*q_3[k+3+2][j][i]-cons_1[k+3-2][j][i]*q_3[k+3-2][j][i])+0.038*(cons_1[k+3+3][j][i]*q_3[k+3+3][j][i]-cons_1[k+3-3][j][i]*q_3[k+3-3][j][i])-0.0035*(cons_1[k+3+4][j][i]*q_3[k+3+4][j][i]-cons_1[k+3-4][j][i]*q_3[k+3-4][j][i]))*dxinv2;
flux2_a = flux_2[k][j][i];
flux_2[k][j][i] = flux2_a - (0.8*(cons_2[k+1][j][i]*q_3[k+1][j][i]-cons_2[k-1][j][i]*q_3[k-1][j][i])-0.2*(cons_2[k+2][j][i]*q_3[k+2][j][i]-cons_2[k-2][j][i]*q_3[k-2][j][i])+0.038*(cons_2[k+3][j][i]*q_3[k+3][j][i]-cons_2[k-3][j][i]*q_3[k-3][j][i])-0.0035*(cons_2[k+4][j][i]*q_3[k+4][j][i]-cons_2[k-4][j][i]*q_3[k-4][j][i]))*dxinv2;
flux2_b = flux_2[k+1][j][i];
flux_2[k+1][j][i] = flux2_b - (0.8*(cons_2[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_2[k+1-1][j][i]*q_3[k+1-1][j][i])-0.2*(cons_2[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_2[k+1-2][j][i]*q_3[k+1-2][j][i])+0.038*(cons_2[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_2[k+1-3][j][i]*q_3[k+1-3][j][i])-0.0035*(cons_2[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_2[k+1-4][j][i]*q_3[k+1-4][j][i]))*dxinv2;
flux2_c = flux_2[k+2][j][i];
flux_2[k+2][j][i] = flux2_c - (0.8*(cons_2[k+2+1][j][i]*q_3[k+2+1][j][i]-cons_2[k+2-1][j][i]*q_3[k+2-1][j][i])-0.2*(cons_2[k+2+2][j][i]*q_3[k+2+2][j][i]-cons_2[k+2-2][j][i]*q_3[k+2-2][j][i])+0.038*(cons_2[k+2+3][j][i]*q_3[k+2+3][j][i]-cons_2[k+2-3][j][i]*q_3[k+2-3][j][i])-0.0035*(cons_2[k+2+4][j][i]*q_3[k+2+4][j][i]-cons_2[k+2-4][j][i]*q_3[k+2-4][j][i]))*dxinv2;
flux2_d = flux_2[k+3][j][i];
flux_2[k+3][j][i] = flux2_d - (0.8*(cons_2[k+3+1][j][i]*q_3[k+3+1][j][i]-cons_2[k+3-1][j][i]*q_3[k+3-1][j][i])-0.2*(cons_2[k+3+2][j][i]*q_3[k+3+2][j][i]-cons_2[k+3-2][j][i]*q_3[k+3-2][j][i])+0.038*(cons_2[k+3+3][j][i]*q_3[k+3+3][j][i]-cons_2[k+3-3][j][i]*q_3[k+3-3][j][i])-0.0035*(cons_2[k+3+4][j][i]*q_3[k+3+4][j][i]-cons_2[k+3-4][j][i]*q_3[k+3-4][j][i]))*dxinv2;
flux3_a = flux_3[k][j][i];
flux_3[k][j][i] = flux3_a - (0.8*(cons_3[k+1][j][i]*q_3[k+1][j][i]-cons_3[k-1][j][i]*q_3[k-1][j][i]+(q_4[k+1][j][i]-q_4[k-1][j][i]))-0.2*(cons_3[k+2][j][i]*q_3[k+2][j][i]-cons_3[k-2][j][i]*q_3[k-2][j][i]+(q_4[k+2][j][i]-q_4[k-2][j][i]))+0.038*(cons_3[k+3][j][i]*q_3[k+3][j][i]-cons_3[k-3][j][i]*q_3[k-3][j][i]+(q_4[k+3][j][i]-q_4[k-3][j][i]))-0.0035*(cons_3[k+4][j][i]*q_3[k+4][j][i]-cons_3[k-4][j][i]*q_3[k-4][j][i]+(q_4[k+4][j][i]-q_4[k-4][j][i])))*dxinv2;
flux3_b = flux_3[k+1][j][i];
flux_3[k+1][j][i] = flux3_b - (0.8*(cons_3[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_3[k+1-1][j][i]*q_3[k+1-1][j][i]+(q_4[k+1+1][j][i]-q_4[k+1-1][j][i]))-0.2*(cons_3[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_3[k+1-2][j][i]*q_3[k+1-2][j][i]+(q_4[k+1+2][j][i]-q_4[k+1-2][j][i]))+0.038*(cons_3[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_3[k+1-3][j][i]*q_3[k+1-3][j][i]+(q_4[k+1+3][j][i]-q_4[k+1-3][j][i]))-0.0035*(cons_3[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_3[k+1-4][j][i]*q_3[k+1-4][j][i]+(q_4[k+1+4][j][i]-q_4[k+1-4][j][i])))*dxinv2;
flux3_c = flux_3[k+2][j][i];
flux_3[k+2][j][i] = flux3_c - (0.8*(cons_3[k+2+1][j][i]*q_3[k+2+1][j][i]-cons_3[k+2-1][j][i]*q_3[k+2-1][j][i]+(q_4[k+2+1][j][i]-q_4[k+2-1][j][i]))-0.2*(cons_3[k+2+2][j][i]*q_3[k+2+2][j][i]-cons_3[k+2-2][j][i]*q_3[k+2-2][j][i]+(q_4[k+2+2][j][i]-q_4[k+2-2][j][i]))+0.038*(cons_3[k+2+3][j][i]*q_3[k+2+3][j][i]-cons_3[k+2-3][j][i]*q_3[k+2-3][j][i]+(q_4[k+2+3][j][i]-q_4[k+2-3][j][i]))-0.0035*(cons_3[k+2+4][j][i]*q_3[k+2+4][j][i]-cons_3[k+2-4][j][i]*q_3[k+2-4][j][i]+(q_4[k+2+4][j][i]-q_4[k+2-4][j][i])))*dxinv2;
flux3_d = flux_3[k+3][j][i];
flux_3[k+3][j][i] = flux3_d - (0.8*(cons_3[k+3+1][j][i]*q_3[k+3+1][j][i]-cons_3[k+3-1][j][i]*q_3[k+3-1][j][i]+(q_4[k+3+1][j][i]-q_4[k+3-1][j][i]))-0.2*(cons_3[k+3+2][j][i]*q_3[k+3+2][j][i]-cons_3[k+3-2][j][i]*q_3[k+3-2][j][i]+(q_4[k+3+2][j][i]-q_4[k+3-2][j][i]))+0.038*(cons_3[k+3+3][j][i]*q_3[k+3+3][j][i]-cons_3[k+3-3][j][i]*q_3[k+3-3][j][i]+(q_4[k+3+3][j][i]-q_4[k+3-3][j][i]))-0.0035*(cons_3[k+3+4][j][i]*q_3[k+3+4][j][i]-cons_3[k+3-4][j][i]*q_3[k+3-4][j][i]+(q_4[k+3+4][j][i]-q_4[k+3-4][j][i])))*dxinv2;
#pragma end stencil1
}
}
__global__ void hypterm_2 (double * __restrict__ flux_in_4, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + (int)(2*threadIdx.z);
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_4[k][j][i] = ((0.8*(cons_4[k][j][i+1]*q_1[k][j][i+1]-cons_4[k][j][i-1]*q_1[k][j][i-1]+(q_4[k][j][i+1]*q_1[k][j][i+1]-q_4[k][j][i-1]*q_1[k][j][i-1]))-0.2*(cons_4[k][j][i+2]*q_1[k][j][i+2]-cons_4[k][j][i-2]*q_1[k][j][i-2]+(q_4[k][j][i+2]*q_1[k][j][i+2]-q_4[k][j][i-2]*q_1[k][j][i-2]))+0.038*(cons_4[k][j][i+3]*q_1[k][j][i+3]-cons_4[k][j][i-3]*q_1[k][j][i-3]+(q_4[k][j][i+3]*q_1[k][j][i+3]-q_4[k][j][i-3]*q_1[k][j][i-3]))-0.0035*(cons_4[k][j][i+4]*q_1[k][j][i+4]-cons_4[k][j][i-4]*q_1[k][j][i-4]+(q_4[k][j][i+4]*q_1[k][j][i+4]-q_4[k][j][i-4]*q_1[k][j][i-4])))*dxinv0);
flux_4[k+1][j][i] = ((0.8*(cons_4[k+1][j][i+1]*q_1[k+1][j][i+1]-cons_4[k+1][j][i-1]*q_1[k+1][j][i-1]+(q_4[k+1][j][i+1]*q_1[k+1][j][i+1]-q_4[k+1][j][i-1]*q_1[k+1][j][i-1]))-0.2*(cons_4[k+1][j][i+2]*q_1[k+1][j][i+2]-cons_4[k+1][j][i-2]*q_1[k+1][j][i-2]+(q_4[k+1][j][i+2]*q_1[k+1][j][i+2]-q_4[k+1][j][i-2]*q_1[k+1][j][i-2]))+0.038*(cons_4[k+1][j][i+3]*q_1[k+1][j][i+3]-cons_4[k+1][j][i-3]*q_1[k+1][j][i-3]+(q_4[k+1][j][i+3]*q_1[k+1][j][i+3]-q_4[k+1][j][i-3]*q_1[k+1][j][i-3]))-0.0035*(cons_4[k+1][j][i+4]*q_1[k+1][j][i+4]-cons_4[k+1][j][i-4]*q_1[k+1][j][i-4]+(q_4[k+1][j][i+4]*q_1[k+1][j][i+4]-q_4[k+1][j][i-4]*q_1[k+1][j][i-4])))*dxinv0);
flux_4[k][j][i] -= (0.8*(cons_4[k][j+1][i]*q_2[k][j+1][i]-cons_4[k][j-1][i]*q_2[k][j-1][i]+(q_4[k][j+1][i]*q_2[k][j+1][i]-q_4[k][j-1][i]*q_2[k][j-1][i]))-0.2*(cons_4[k][j+2][i]*q_2[k][j+2][i]-cons_4[k][j-2][i]*q_2[k][j-2][i]+(q_4[k][j+2][i]*q_2[k][j+2][i]-q_4[k][j-2][i]*q_2[k][j-2][i]))+0.038*(cons_4[k][j+3][i]*q_2[k][j+3][i]-cons_4[k][j-3][i]*q_2[k][j-3][i]+(q_4[k][j+3][i]*q_2[k][j+3][i]-q_4[k][j-3][i]*q_2[k][j-3][i]))-0.0035*(cons_4[k][j+4][i]*q_2[k][j+4][i]-cons_4[k][j-4][i]*q_2[k][j-4][i]+(q_4[k][j+4][i]*q_2[k][j+4][i]-q_4[k][j-4][i]*q_2[k][j-4][i])))*dxinv1;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1][j+1][i]*q_2[k+1][j+1][i]-cons_4[k+1][j-1][i]*q_2[k+1][j-1][i]+(q_4[k+1][j+1][i]*q_2[k+1][j+1][i]-q_4[k+1][j-1][i]*q_2[k+1][j-1][i]))-0.2*(cons_4[k+1][j+2][i]*q_2[k+1][j+2][i]-cons_4[k+1][j-2][i]*q_2[k+1][j-2][i]+(q_4[k+1][j+2][i]*q_2[k+1][j+2][i]-q_4[k+1][j-2][i]*q_2[k+1][j-2][i]))+0.038*(cons_4[k+1][j+3][i]*q_2[k+1][j+3][i]-cons_4[k+1][j-3][i]*q_2[k+1][j-3][i]+(q_4[k+1][j+3][i]*q_2[k+1][j+3][i]-q_4[k+1][j-3][i]*q_2[k+1][j-3][i]))-0.0035*(cons_4[k+1][j+4][i]*q_2[k+1][j+4][i]-cons_4[k+1][j-4][i]*q_2[k+1][j-4][i]+(q_4[k+1][j+4][i]*q_2[k+1][j+4][i]-q_4[k+1][j-4][i]*q_2[k+1][j-4][i])))*dxinv1;
flux_4[k][j][i] -= (0.8*(cons_4[k+1][j][i]*q_3[k+1][j][i]-cons_4[k-1][j][i]*q_3[k-1][j][i]+(q_4[k+1][j][i]*q_3[k+1][j][i]-q_4[k-1][j][i]*q_3[k-1][j][i]))-0.2*(cons_4[k+2][j][i]*q_3[k+2][j][i]-cons_4[k-2][j][i]*q_3[k-2][j][i]+(q_4[k+2][j][i]*q_3[k+2][j][i]-q_4[k-2][j][i]*q_3[k-2][j][i]))+0.038*(cons_4[k+3][j][i]*q_3[k+3][j][i]-cons_4[k-3][j][i]*q_3[k-3][j][i]+(q_4[k+3][j][i]*q_3[k+3][j][i]-q_4[k-3][j][i]*q_3[k-3][j][i]))-0.0035*(cons_4[k+4][j][i]*q_3[k+4][j][i]-cons_4[k-4][j][i]*q_3[k-4][j][i]+(q_4[k+4][j][i]*q_3[k+4][j][i]-q_4[k-4][j][i]*q_3[k-4][j][i])))*dxinv2;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_4[k+1-1][j][i]*q_3[k+1-1][j][i]+(q_4[k+1+1][j][i]*q_3[k+1+1][j][i]-q_4[k+1-1][j][i]*q_3[k+1-1][j][i]))-0.2*(cons_4[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_4[k+1-2][j][i]*q_3[k+1-2][j][i]+(q_4[k+1+2][j][i]*q_3[k+1+2][j][i]-q_4[k+1-2][j][i]*q_3[k+1-2][j][i]))+0.038*(cons_4[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_4[k+1-3][j][i]*q_3[k+1-3][j][i]+(q_4[k+1+3][j][i]*q_3[k+1+3][j][i]-q_4[k+1-3][j][i]*q_3[k+1-3][j][i]))-0.0035*(cons_4[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_4[k+1-4][j][i]*q_3[k+1-4][j][i]+(q_4[k+1+4][j][i]*q_3[k+1+4][j][i]-q_4[k+1-4][j][i]*q_3[k+1-4][j][i])))*dxinv2;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig_0 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm_0 <<<gridconfig_0, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_1 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z));
hypterm_1 <<<gridconfig_1, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_2 <<<gridconfig_2, blockconfig>>> (flux_4, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
7,679 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Jiang Yufan (email: jiangyufan2018@outlook.com) 2019-03-20
*/
#include "DropoutWithIndex.cuh"
#include "../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
__global__
/*
This is a special implementation of "dropout" to reduce memory with maskIndex.
>> tData - the data pointer of the target tensor
>> sIndex - mask index
>> size - the size of the sIndex
*/
void KernelDropoutWithIndex1D(DTYPE * tData, int * sIndex, int size)
{
/* block id */
int i = blockDim.x * blockIdx.x + threadIdx.x;
DTYPE * t = tData;
if (i < size) {
int id = sIndex[i];
t[id] = DTYPE(0.0F);
}
}
/*
This is a special implementation of "dropout" to reduce memory with maskIndex.
>> x - input tensor
>> maskIndex - mask index tensor
>> c - output tensor
*/
void _CudaDropoutWithIndex(const XTensor * x, XTensor * maskIndex, XTensor * c)
{
int devID = c->devID;
int blockNum = maskIndex->unitNum;
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
GDevs.GetCudaThread(devID, blockNum, cudaGrids, cudaBlocks);
dim3 blocks(cudaGrids[0]);
dim3 threads(cudaBlocks[0]);
DTYPE * tData = (DTYPE*)c->data;
int * sIndex = NULL;
sIndex = (int *)maskIndex->data;
KernelDropoutWithIndex1D <<<blocks, threads >>>(tData, sIndex, blockNum);
BacktoCudaDev(devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
7,680 | #include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <cuda.h>
#define NUM_ROWS 8192
#define NUM_COLS 8192
#define EPSILON 0.1
#define TRUE 1.0f
#define FALSE 0.0f
#define TOTITERATIONS 359
double When()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
__global__ void initArrays(float *from, float *to, float *locked, int size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < size) {
// inner is 50s
if ((blockIdx.x > 0) && (blockIdx.x < NUM_ROWS)){
if ((threadIdx.x > 0) && (threadIdx.x < NUM_COLS)) {
from[idx] = 50;
to[idx] = 50;
locked[idx] = FALSE;
}
}
// sides are 0
if ((threadIdx.x == 0) || (threadIdx.x == NUM_COLS-1)) {
from[idx] = 0;
to[idx] = 0;
locked[idx] = TRUE;
}
// top is 0
if (blockIdx.x == 0) {
from[idx] = 0;
to[idx] = 0;
locked[idx] = TRUE;
}
// bottom is 100
if (blockIdx.x == NUM_ROWS-1) {
from[idx] = 100;
to[idx] = 100;
locked[idx] = TRUE;
}
}
}
__global__ void resetKeepgoing(int *lkeepgoing, int *keepgoing) {
lkeepgoing[threadIdx.x] = 1;
if (threadIdx.x == 0)
*keepgoing = 0;
}
__global__ void calculate(float *from, float *to, float* locked, int size, int *lkeepgoing) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
float total, self;
if (idx < size) {
if (locked[idx] == TRUE) {
return;
}
total = from[idx - NUM_COLS] + from[idx-1] + from[idx + 1] + from[idx + NUM_COLS];
self = from[idx];
to[idx] = (total + 4 * self) * 0.125;
// Set the keepgoing data for the block
if ((fabs(self - (total)/4) < EPSILON)) {
lkeepgoing[blockIdx.x] = 0;
}
}
}
__global__ void reduceSingle(int *lkeepgoing, int *keepgoing)
{
extern __shared__ int sdata[];
unsigned int tid, i, s;
// Calculate our offset into the row
int rowStartPos = threadIdx.x * (NUM_ROWS/blockDim.x); //number of rows / number of threads in a block
// The number of cols per thread
int colsPerThread = NUM_ROWS/blockDim.x; //number of rows / block dimention = 8k/1024 = 8
// perform first level of reduction,
// reading from global memory, writing to shared memory
tid = threadIdx.x;
// Sum my part of 1D array and put it in shared memory
// Method 1
sdata[tid] = 0;
for (i = rowStartPos; i < colsPerThread+rowStartPos; i++) {
sdata[tid] += lkeepgoing[i];
}
__syncthreads();
// i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
// sdata[tid] = lkeepgoing[i] & lkeepgoing[i+blockDim.x];
// __syncthreads();
if (tid < NUM_ROWS / 2)
{
sdata[tid] &= sdata[tid + 4096]; __syncthreads();
sdata[tid] &= sdata[tid + 2048]; __syncthreads();
sdata[tid] &= sdata[tid + 1024]; __syncthreads();
sdata[tid] &= sdata[tid + 512]; __syncthreads();
sdata[tid] &= sdata[tid + 256]; __syncthreads();
sdata[tid] &= sdata[tid + 128]; __syncthreads();
sdata[tid] &= sdata[tid + 64]; __syncthreads();
sdata[tid] &= sdata[tid + 32]; __syncthreads();
sdata[tid] &= sdata[tid + 16]; __syncthreads();
sdata[tid] &= sdata[tid + 8]; __syncthreads();
sdata[tid] &= sdata[tid + 4]; __syncthreads();
sdata[tid] &= sdata[tid + 2]; __syncthreads();
sdata[tid] &= sdata[tid + 1]; __syncthreads();
}
// Method 2
// sdata[tid] = 0;
// for (i = tid; i < NUM_ROWS; i+=blockDim.x) { // everyone will start in a chunk together grabbing a chunk at a time and processing it later
// sdata[threadIdx.x] += lkeepgoing[i];
// }
// __syncthreads();
//
// if (tid < NUM_ROWS / 2)
// {
// sdata[tid] &= sdata[tid + 4096]; __syncthreads();
// sdata[tid] &= sdata[tid + 2048]; __syncthreads();
// sdata[tid] &= sdata[tid + 1024]; __syncthreads();
// sdata[tid] &= sdata[tid + 512]; __syncthreads();
// sdata[tid] &= sdata[tid + 256]; __syncthreads();
// sdata[tid] &= sdata[tid + 128]; __syncthreads();
// sdata[tid] &= sdata[tid + 64]; __syncthreads();
// sdata[tid] &= sdata[tid + 32]; __syncthreads();
// sdata[tid] &= sdata[tid + 16]; __syncthreads();
// sdata[tid] &= sdata[tid + 8]; __syncthreads();
// sdata[tid] &= sdata[tid + 4]; __syncthreads();
// sdata[tid] &= sdata[tid + 2]; __syncthreads();
// sdata[tid] &= sdata[tid + 1]; __syncthreads();
// }
// Method 3
// i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
// sdata[tid] = lkeepgoing[i] & lkeepgoing[i+blockDim.x];
// __syncthreads();
//
// // do reduction in shared memory
// for(s=blockDim.x/2; s>32; s>>=1)
// {
// if (tid < s)
// {
// sdata[tid] &= sdata[tid + s];
// }
// __syncthreads();
// }
//
// if (tid < 32)
// {
// sdata[tid] &= sdata[tid + 32]; __syncthreads();
// sdata[tid] &= sdata[tid + 16]; __syncthreads();
// sdata[tid] &= sdata[tid + 8]; __syncthreads();
// sdata[tid] &= sdata[tid + 4]; __syncthreads();
// sdata[tid] &= sdata[tid + 2]; __syncthreads();
// sdata[tid] &= sdata[tid + 1]; __syncthreads();
// }
// write result for this block to global mem
if (tid == 0) *keepgoing = sdata[0];
}
int main(void) {
double timestart, timefinish, timetaken; // host data
float *from_d, *to_d, *locked; // device data
float *temp_d;
int *lkeepgoing, *keepgoing; // more device data
int nBytes;
int iterations;
int SIZE, blocks, threadsperblock;
int *steadyState;
SIZE = NUM_ROWS * NUM_COLS;
blocks = 8192;
threadsperblock = 8192;
steadyState = (int*)malloc(sizeof(int));
*steadyState = 0;
nBytes = SIZE*sizeof(float);
cudaMalloc((void **) &from_d, nBytes);
cudaMalloc((void **) &to_d, nBytes);
cudaMalloc((void **) &locked, nBytes);
cudaMalloc((void **) &lkeepgoing, blocks * sizeof(int));
cudaMalloc((void **) &keepgoing, sizeof(int));
initArrays<<<blocks,threadsperblock>>> (from_d, to_d, locked, SIZE);
iterations = 0;
timestart = When();
while (!*steadyState) {//&& TOTITERATIONS != iterations) {
resetKeepgoing<<<1,blocks>>> (lkeepgoing, keepgoing);
calculate<<<blocks,threadsperblock>>> (from_d, to_d, locked, SIZE, lkeepgoing);
reduceSingle<<<1,blocks, blocks*sizeof(int)>>> (lkeepgoing, keepgoing);
cudaMemcpy(steadyState, keepgoing, sizeof(int), cudaMemcpyDeviceToHost);
iterations++;
temp_d = from_d;
from_d = to_d;
to_d = temp_d;
printf("Iteration %d\n", iterations);
}
timefinish = When();
float* plate = (float*)malloc(sizeof(float) * SIZE);
cudaMemcpy(plate, to_d, sizeof(float)*SIZE, cudaMemcpyDeviceToHost);
cudaFree(from_d);
cudaFree(lkeepgoing);
cudaFree(keepgoing);
free(steadyState);
timetaken = timefinish - timestart;
printf("Iteration %d time %f\n", iterations, timetaken);
int k;
for (k = 0; k < SIZE; k++) {
if (k % 8191 == 0) {
printf("\n");
}
printf("%d\t", plate[k]);
}
cudaFree(to_d);
free(plate);
return 0;
}
|
7,681 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <chrono>
#include <iostream>
#include <stdio.h>
#include "merge_sort.cuh"
void init_array(int* in, int size, int max_level);
void print_vector(int* in, int size);
int main()
{
int size = 100;
int max_val = 100;
int* test = new int[size];
int* out = new int[size];
init_array(test, size, max_val);
print_vector(test, size);
auto start_cpu = std::chrono::high_resolution_clock::now();
mergeSortAsc(test, size, out);
auto end_cpu = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> time_span_cpu = std::chrono::duration_cast<std::chrono::duration<double>>(end_cpu - start_cpu);
print_vector(out, size);
std::cout << "Merge sort, CPU time elapsed (millisec) " << time_span_cpu.count() << std::endl;
system("pause");
return 0;
}
void init_array(int* in, int size, int max_level)
{
for (int i = 0; i < size; i++)
{
in[i] = floor(max_level*((double)rand() / (RAND_MAX)));
}
}
void print_vector(int* in, int size)
{
for (int i = 0; i < size; i++)
{
std::cout << in[i] << " ";
}
std::cout << std::endl;
}
|
7,682 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_rd_streams, unsigned addr1, unsigned addr2, unsigned addr3, unsigned addr4, unsigned addr5, unsigned addr6, unsigned addr7, unsigned addr8, unsigned rd_stream_length, unsigned num_wr_streams, unsigned wr_stream_length, unsigned *x)
{
int id = threadIdx.x;
if (id <= 0) {
unsigned a;
asm ("ld.shared.u32 %0, [%1];" : "=r"(a) : "r"(id) );
x[0] = a;
asm ("ld.shared.u32 %0, [%1];" : "=r"(a) : "r"(id) );
x[1] = a;
}
}
int main(int argc, char *argv[])
{
int N = 1000;
// Perform SAXPY on 1M elements
unsigned *h_x = (unsigned *)malloc(N*sizeof(unsigned));
unsigned *d_x = (unsigned *)100;
unsigned *d_x_copy;
cudaMalloc(&d_x_copy, N*sizeof(unsigned));
for (int i = 1 ; i <= N ; i++)
h_x[i-1] = (unsigned)i;
cudaMemcpy(d_x, h_x, N*sizeof(unsigned), cudaMemcpyHostToDevice);
unsigned *h_dummy, *d_dummy;
cudaMalloc(&d_dummy, 2*sizeof(unsigned));
h_dummy = (unsigned *)malloc(2*sizeof(unsigned));
saxpy<<<1, 8>>>(8, 100, 100, 100, 100, 100, 100, 100, 100, 1000, 0, 1000, d_dummy);
cudaMemcpy(h_dummy, d_dummy, 2*sizeof(unsigned), cudaMemcpyDeviceToHost);
printf("%u\n", h_dummy[0]);
printf("%u\n", h_dummy[1]);
}
|
7,683 | #ifdef __CUDACC__
#ifndef BC_GPU_IMPL
#define BC_GPU_IMPL
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <limits>
#include <cstddef>
#include <type_traits>
namespace BC {
namespace gpu_impl {
template<typename T, typename J> __global__
static void fill(T& t, const J j, int sz) {
for (int i = 0; i < sz; ++i) {
t[i] = j;
}
}
template<typename T, typename J> __global__
static void fill(T& t, const J* j, int sz) {
for (int i = 0; i < sz; ++i) {
t[i] = j[0];
}
}
template<class T, class U, class V> __global__
static void scalarMul(T* t, U* u, V* v) {
*t = u[0] * v[0];
}
template<typename T> __global__
static void zero(T& t, int sz) {
for (int i = 0; i < sz; ++i) {
t[i] = 0;
}
}
template<class T, class J> __global__
static void copy(T t, const J j, int sz) {
for (int i = 0; i < sz; ++i) {
t[i] = j[i];
}
}
template<class T, class J> __global__
static void copyStructPtr(T t, J* j, int sz) {
for (int i = 0; i < sz; ++i) {
t[i] = (*j)[i];
}
}
template<class T> __global__
static void eval(T t, int sz) {
for (int i = 0; i < sz; ++i) {
t[i];
}
}
template<class T>
struct _max {
static constexpr T value = std::numeric_limits<T>::max();
};
template<typename T, typename J> __global__
static void randomize(T& t, J lower_bound, J upper_bound, int sz, int seed) {
curandState_t state;
curand_init(seed, /* the seed controls the sequence of random values that are produced */
seed, /* the sequence number is only important with multiple cores */
1, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&state);
for (int i = 0; i < sz; ++i) {
t[i] = curand(&state);
t[i] /= 10000000000; //curand max value
t[i] *= (upper_bound - lower_bound);
t[i] += lower_bound;
}
}
}
}
#endif
#endif //cudacc
|
7,684 | // Define this to turn on error checking
#define CUDA_ERROR_CHECK
#include <cuda.h>
#include <stdio.h>
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(cudaError err, const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
} |
7,685 | /* Program : To add two randomly generated vectors with the help of a GPU
* Author : Anant Shah
* Roll Number : EE16B105
* Date : 14-8-2018
**/
#include<stdio.h>
#include<cuda.h>
#include<time.h>
#include<math.h>
#include<stdlib.h>
#define SIZE 32768
#define NUM_THREADS 256
#define NUM_BLOCKS 128
#define ERROR_HANDLER(error_msg) error_handler(error_msg)
void error_handler(cudaError_t error_msg){
/* Handles error messages */
if(error_msg != cudaSuccess){
printf("%s in %s at line %d\n",cudaGetErrorString(error_msg),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
__global__ void vecAdd(int *A,int *B,double *C,int length) {
/* Kernel definition. Each thread will implement addition for one data item in the vectors. */
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<length){
C[i] = A[i] + B[i];
}
}
void readFileData(char *file,int size,int *vector){
/* Function to read the integers from a .txt or .dat file */
FILE *fp;
fp = fopen(file,"r+");
if(fp!=NULL){
for(int i=0;i<size;i++){
fscanf(fp,"%d",(vector+i));
}
fclose(fp);
}
else{
printf("error : File %s not found",file);
exit(EXIT_FAILURE);
}
}
int main(int argc,char **argv) {
if(argc!=3){
printf("Error : Invalid number of arguments \n");
exit(EXIT_FAILURE);
}
/*************************** Variable Declaration ****************************/
int *h_A; /* Vector declared on the host */
int *h_B; /* Vector declared on the host */
double *h_C; /* Vector which will be the sum of previously deined vectors */
int *d_A; /* Vector which will be a copy of <h_A> but on the device(GPU) */
int *d_B; /* Vector which will be a copy of <h_B> but on the device(GPU) */
double *d_C; /* Vector which stores the sum of <d_A> and <d_B> on the device */
size_t size_input = SIZE*sizeof(int);
size_t size_output = SIZE*sizeof(double);
clock_t start; /* Start time of the program */
clock_t stop; /* Stop time of the program */
char *file_A; /* File containing the data elements of vector A */
char *file_B; /* File containing the data elements of vector B */
char *file_out = "ee16b105_3_out.txt";
FILE *fp; /* File pointer which manages the output text file */
file_A = argv[1];
file_B = argv[2];
/******************************************************************************/
/************************** Memory Allocation *********************************/
h_A = (int *)malloc(size_input); /* Allocate memory to the vector on the host */
h_B = (int *)malloc(size_input);
h_C = (double *)malloc(size_output);
/************************** Read the vector elements from the file ***************************/
readFileData(file_A,SIZE,h_A);
readFileData(file_B,SIZE,h_B);
/******************************************************************************/
ERROR_HANDLER(cudaMalloc((void **)&d_A,size_input));
ERROR_HANDLER(cudaMalloc((void **)&d_B,size_input));
ERROR_HANDLER(cudaMalloc((void **)&d_C,size_output));
/************************* Copy vectors to the host ***************************/
ERROR_HANDLER(cudaMemcpy(d_A, h_A, size_input, cudaMemcpyHostToDevice));
ERROR_HANDLER(cudaMemcpy(d_B, h_B, size_input, cudaMemcpyHostToDevice));
/************************ Add the vectors *************************************/
/* The ceiling function for the number of blocks is not applied as the number of threads is exactly equal to the number of data items to work on */
start = clock();
vecAdd<<<NUM_BLOCKS,NUM_THREADS>>>(d_A, d_B, d_C, SIZE);
ERROR_HANDLER(cudaDeviceSynchronize());
stop = clock();
ERROR_HANDLER(cudaMemcpy(h_C, d_C, size_output, cudaMemcpyDeviceToHost)); /* Copy the result back to the host */
printf("Time for the vector addition : %f (seconds) \n",(stop-start)/(float)CLOCKS_PER_SEC);
/************************* Print the Result ***********************************/
fp = fopen(file_out,"w");
if(fp!=NULL){
for(int i=0;i<SIZE;i++){
fprintf(fp,"%d \n",h_A[i]);
fprintf(fp,"%d \n",h_B[i]);
fprintf(fp,"%.4f \n",h_C[i]);
}
}else{
printf("error : Could not write to file");
exit(EXIT_FAILURE);
}
fclose(fp);
/************************* Free Memory ****************************************/
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
|
7,686 | #include "includes.h"
__global__ void CopyChannel_i_Kernel(float* output, const float* input, const int i, const int width, const int height, const int nChannels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height)
return;
int offset = y*width + x;
output[offset] = input[offset*nChannels + i];
} |
7,687 | #include <stdlib.h>
#include <stdio.h>
#define MAX_THREADS 512
#define MAX_BLOCKS 64
unsigned int nextPow2(unsigned int x);
void getNumBlocksAndNumThreads(int n, int &blocks, int &threads);
void kernel(double *srcImage,
double *smpImage,
int srcwidth,
int srcheight,
int smpwidth,
int smpheight,
int srcx,
int srcy,
double *tempsigmaST,
double *tempsigmaS,
double &sigmaTT,
double *sigmaTS);
__global__ void reduce(double *input, double *output, int n);
double getSigma(double *input1, double *input2, int n);
__global__ void ReduceLine(double *srcImage, double *smpImage, double *sigmaST, double *sigmaS, int srcx, int srcy, int srcwidth, int smpwidth);
unsigned int nextPow2(unsigned int x) {
-- x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++ x;
}
void getNumBlocksAndNumThreads(int n, int &blocks, int &threads) {
threads = n < MAX_THREADS ? nextPow2(n) : MAX_THREADS;
blocks = (n + threads - 1) / threads;
return;
}
/*
In this function, one block calculate one line, block is one dimession and the thread is also one dimession
*/
__global__ void ReduceLine(double *srcImage, double *smpImage, double *sigmaST, double *sigmaS, int srcx, int srcy, int srcwidth, int smpwidth) {
__shared__ double volatile sigmast[MAX_THREADS];
//__shared__ double sigmas[MAX_THREADS];
unsigned int srcBegin = (srcx + blockIdx.x) * srcwidth + srcy; // get the begin point in the srcImage.
unsigned int smpBegin = blockIdx.x * smpwidth; // get the begin point in the smpImage.
sigmast[threadIdx.x] = 0.0 ;
//sigmas[threadIdx.x] = 0.0;
unsigned int offset = blockDim.x;
for(int i = 0; threadIdx.x + i < smpwidth; i += offset) {
sigmast[threadIdx.x] += srcImage[srcBegin + threadIdx.x + i] * smpImage[smpBegin + threadIdx.x + i];
// sigmas[threadIdx.x] += srcImage[srcBegin + threadIdx.x + i] * srcImage[srcBegin + threadIdx.x + i];
}
__syncthreads();
for(unsigned int s = blockDim.x; s > 64; s /= 2) {
if(threadIdx.x < s / 2) {
sigmast[threadIdx.x] += sigmast[threadIdx.x + s / 2];
// sigmas[threadIdx.x] += sigmas[threadIdx.x + s / 2];
}
__syncthreads();
}
if(threadIdx.x < 32) {
sigmast[threadIdx.x] += sigmast[threadIdx.x + 32];
sigmast[threadIdx.x] += sigmast[threadIdx.x + 16];
sigmast[threadIdx.x] += sigmast[threadIdx.x + 8];
sigmast[threadIdx.x] += sigmast[threadIdx.x + 4];
sigmast[threadIdx.x] += sigmast[threadIdx.x + 2];
sigmast[threadIdx.x] += sigmast[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
sigmaST[blockIdx.x] = sigmast[0];
// sigmaS[blockIdx.x] = sigmas[0];
}
}
__global__ void reduce(double *input, double *output, int n) {
__shared__ volatile double scratch[MAX_THREADS];
scratch[threadIdx.x] = 0.0;
unsigned int offset = blockDim.x;
for(unsigned int i = 0; i + threadIdx.x < n; i += offset) {
scratch[threadIdx.x] += input[threadIdx.x + i];
}
__syncthreads();
for(unsigned int s = blockDim.x; s > 64; s /= 2) {
if(threadIdx.x < s / 2) {
scratch[threadIdx.x] += scratch[threadIdx.x + s / 2];
}
__syncthreads();
}
if(threadIdx.x < 32) {
scratch[threadIdx.x] += scratch[threadIdx.x + 32];
scratch[threadIdx.x] += scratch[threadIdx.x + 16];
scratch[threadIdx.x] += scratch[threadIdx.x + 8];
scratch[threadIdx.x] += scratch[threadIdx.x + 4];
scratch[threadIdx.x] += scratch[threadIdx.x + 2];
scratch[threadIdx.x] += scratch[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
*output = scratch[0];
}
}
void kernel(double *srcImage,
double *smpImage,
int srcwidth,
int srcheight,
int smpwidth,
int smpheight,
int srcx,
int srcy,
double *tempsigmaST,
double *tempsigmaS,
double &sigmaS,
double *sigmaST) {
double sigmast = 0.0;
double sigmas = 0.0;
int blocks = smpheight, threads = MAX_THREADS;
dim3 blockD(blocks, 1, 1);
dim3 threadD(threads, 1, 1);
// check the parameter
cudaThreadSynchronize();
ReduceLine<<<blockD, threadD>>>(srcImage, smpImage, tempsigmaST, tempsigmaS, srcx, srcy, srcwidth, smpwidth);
cudaThreadSynchronize();
blocks = 1;
threads = nextPow2(smpheight);
dim3 BlockD(blocks, 1, 1);
dim3 ThreadD(threads, 1, 1);
double *getsigmaST = NULL;
// double *getsigmaS = NULL;
cudaMalloc(&getsigmaST, sizeof(double));
// cudaMalloc(&getsigmaS, sizeof(double));
cudaThreadSynchronize();
reduce<<<BlockD, ThreadD>>>(tempsigmaST, sigmaST + srcx * srcwidth + srcy, smpheight);
// cudaThreadSynchronize();
// reduce<<<BlockD, ThreadD>>>(tempsigmaS, getsigmaS, smpheight);
cudaThreadSynchronize();
// cudaMemcpy(&sigmas, getsigmaS, sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy(&sigmast, getsigmaST, sizeof(double), cudaMemcpyDeviceToHost);
// sigmaS = sigmas;
//sigmaST = sigmast;
}
|
7,688 | #include "includes.h"
__device__ void updateCMax(const int nbrOfGrids, const double *d_u1, const double *d_u2, const double *d_u3, const double *d_gama, double *d_cMax)
{
*d_cMax = 0; int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
double ro, p, u;
__shared__ double c;
for (int i = index; i < nbrOfGrids; i += stride){
if (d_u1[i] == 0)
continue;
ro = d_u1[i];
u = d_u2[i] / ro;
p = (d_u3[i] - ro * u * u / 2) * (*d_gama - 1);
c = sqrt(*d_gama * abs(p) / ro);
if (*d_cMax < c + abs(u))
*d_cMax = c + abs(u);
}
}
__global__ void initDeviceMemory(const int nbrOfGrids, double *d_u1, double *d_u2, double *d_u3, double *d_vol, double *d_h, double *d_length, double *d_gama, double *d_cfl, double *d_nu, double *d_tau, double *d_cMax, double *d_t) {
*d_t = 0; // time
*d_length = 1; // length of shock tube
*d_gama = 1.4; // ratio of specific heats
*d_cfl = 0.9; // Courant-Friedrichs-Lewy number
*d_nu = 0.0; // artificial viscosity coefficient
*d_h = *d_length / (nbrOfGrids - 1); // space grid size
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x;
for(int i = index; i < nbrOfGrids; i+= stride){
double e, ro, p, u = 0;
if (i < nbrOfGrids){
if (i >= int(nbrOfGrids / 2)) { ro = 0.125, p = 0.1; }
else { ro = 1, p = 1; }
e = p / (*d_gama - 1) + ro * u * u / 2;
d_u1[i] = ro;
d_u2[i] = ro * u;
d_u3[i] = e;
d_u3[i] = e;
d_vol[i] = 1;
}
}
updateCMax(nbrOfGrids, d_u1, d_u2, d_u3, d_gama, d_cMax);
*d_tau = (*d_cfl) * (*d_h) / (*d_cMax); // initial time grid size, It will be modified to tMax if this > tMax
} |
7,689 |
#include <stdlib.h>
#include <string.h>
#include "tree_huff.cuh"
struct node *generate_tree(int freq[MAX_CHARS]) {
int count = 0;
struct node *head = NULL;
for (count = 0; count < MAX_CHARS; count++) {
if (freq[count] != 0) {
head = insert_ordered(head, count, freq[count], NULL, NULL, 0);
}
}
if (head == NULL) {
return NULL;
}
while (head->p_next != NULL) {
head = insert_ordered(head, -1, head->freq + (head->p_next)->freq, head, head->p_next, 1);
head = (head->p_next)->p_next;
}
return head;
}
struct node *insert_ordered(struct node *old_head, int ch, int freq,
struct node *left, struct node *right, int tree) {
struct node *new_head;
if (old_head == NULL) {
// crear neuvo nodo
struct node *insert_node = (struct node *)malloc(sizeof(struct node));
insert_node->ch = ch;
insert_node->freq = freq;
insert_node->p_left = left;
insert_node->p_right = right;
insert_node->p_next = NULL;
new_head = insert_node;
}
// insertar nodo
else if ((tree == 0) && (freq < old_head->freq)) {
struct node *insert_node = (struct node *)malloc(sizeof(struct node));
insert_node->ch = ch;
insert_node->freq = freq;
insert_node->p_left = left;
insert_node->p_right = right;
insert_node->p_next = old_head;
new_head = insert_node;
}
else if ((tree == 1) && (freq <= old_head->freq)) {
struct node *insert_node = (struct node *)malloc(sizeof(struct node));
insert_node->ch = ch;
insert_node->freq = freq;
insert_node->p_left = left;
insert_node->p_right = right;
insert_node->p_next = old_head;
new_head = insert_node;
}
else {
new_head = old_head;
new_head->p_next = insert_ordered(old_head->p_next, ch, freq, left, right, tree);
}
return new_head;
}
void build_codes(struct node *tree_head, struct code code_values[MAX_CHARS],
char code[MAX_PATH], int code_len) {
if (tree_head == NULL) {
return;
}
if ((tree_head->p_right == NULL) && (tree_head->p_left == NULL)) {
code_values[tree_head->ch].ch = tree_head->ch;
strncpy(code_values[tree_head->ch].path, code, MAX_PATH);
code_values[tree_head->ch].len = code_len;
}
else {
char tmp[MAX_CHARS] = "";
strncpy(tmp, code, MAX_CHARS);
code_len++;
build_codes(tree_head->p_left, code_values, strncat(code, "0", 1), code_len);
build_codes(tree_head->p_right, code_values, strncat(tmp, "1", 1), code_len);
}
return;
}
void free_tree(struct node *tree_head) {
if (tree_head != NULL) {
free_tree(tree_head->p_left);
free_tree(tree_head->p_right);
free(tree_head);
}
return;
}
|
7,690 | #include <thrust/random.h>
#include <iostream>
int main()
{
int seed;
std::cin >> seed;
thrust::default_random_engine rng;
thrust::uniform_real_distribution<double> dist(25, 40);
rng.discard(seed);
double rng_value = dist(rng);
std::cout << "Valor gerado: " << rng_value << std::endl;
return 0;
} |
7,691 | #include <stdio.h>
// Función kernel que se ejecuta en la GPU
__global__ void saxpy(float *x, float *y, float a, int numElementos)
{
int tid= blockDim.x * blockIdx.x + threadIdx.x;
if (tid < numElementos)
{
y[tid] = a * x[tid] + y[tid];
}
}
int main(void)
{
// Definimos tamaños: 1M de elementos (2^20)
int numElementos = 1<<20; //
size_t tamano = numElementos * sizeof(float);
//Definimos variables
float *h_x, *h_y, *d_x, *d_y;
// Reservamos memoria en el host
h_x = (float *)malloc(tamano);
h_y = (float *)malloc(tamano);
// Asignamos valores en el host
for (int i = 0; i < numElementos; i++)
{
h_x[i] = 1.0f;
h_y[i] = 2.0f;
}
float a = 5.0f;
// Reservamos memoria en el device
cudaMalloc(&d_x, tamano);
cudaMalloc(&d_y, tamano);
// Traspasamos datos de host a device
cudaMemcpy(d_x, h_x, tamano, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, tamano, cudaMemcpyHostToDevice);
//Lanzamos el kernel CUDA
int hilosPorBloque = 8;
int totalBloques =(numElementos + hilosPorBloque - 1) / hilosPorBloque;
saxpy<<<totalBloques, hilosPorBloque>>>(d_x, d_y, 5.0f, tamano);
// Traspasamos datos de device a host
cudaMemcpy(h_y, d_y, tamano, cudaMemcpyDeviceToHost);
// Verificamos resultado
float Error = 0.0f;
int i;
for (i = 0; i < 10; i++) {
Error = Error + abs(h_y[i] - 7.0f);
printf("%2.8f", h_x[i]);
}
printf("Error: %f\n", Error);
// Liberamos memoria device
cudaFree(d_x);
cudaFree(d_y);
// Liberamos memoria host
free(h_x);
free(h_y);
return 0;
}
|
7,692 | #include <stdio.h>
#define NUM_BLOCKS 16
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(int argc,char **argv)
{
// launch the kernel
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
// force the printf()s to flush
gpuErrchk(cudaDeviceSynchronize());
printf("That's all!\n");
return 0;
}
|
7,693 | #include "includes.h"
__global__ void kernel (void){
} |
7,694 | /**
*Base on https://devblogs.nvidia.com/how-query-device-properties-and-handle-errors-cuda-cc/
*/
#include <stdio.h>
#include <cuda_runtime.h>
// print device properties
void showDevice(const cudaDeviceProp &prop)
{
printf("Device Name : %s\n", prop.name);
printf("Major revision number: %d\n", prop.major);
printf("Minor revision number: %d\n", prop.minor);
printf("Number of Stream MultiProcessor : %d.\n", prop.multiProcessorCount);
printf("Memory Clock Rate (KHz) : %d\n", prop.memoryClockRate);
printf("Memory Bus Width (bits) : %d\n",prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf("Total Global Memory : %d.\n", prop.totalGlobalMem);
printf("Shared Memory Per Block : %d.\n", prop.sharedMemPerBlock);
printf("Registers Per Block : %d.\n", prop.regsPerBlock);
printf("Warp Size : %d.\n", prop.warpSize);
printf("Max Threads Per Block : %d.\n", prop.maxThreadsPerBlock);
printf("Max Threads Dim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max Grid Size[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Total Const Memory : %d.\n", prop.totalConstMem);
printf("Clock Rate : %d.\n", prop.clockRate);
printf("Texture Alignment : %d.\n", prop.textureAlignment);
printf("Device Overlap : %d.\n", prop.deviceOverlap);
}
bool initCUDA()
{
int count;
printf("CUDA Device Query...\n");
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
printf("You now have %d CUDA devices.\n",count);
// find the device >= 1.X
int i;
for (i = 0; i < count; ++i) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
showDevice(prop);
break;
}
}
}
// if can't find the device
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
// set cuda device
cudaSetDevice(i);
return true;
}
int main(int argc, char const *argv[])
{
if (initCUDA()) {
printf("CUDA initialized.\n");
}
return 0;
}
|
7,695 | #include <thrust/device_vector.h>
#include <iostream>
__global__ void kernel(float* A, int N, float* res)
{
extern __shared__ float shm[];
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
shm[tid] = A[idx];
__syncthreads();
for (int i = 1; i < blockDim.x; i *= 2) {
int index = 2 * i * tid;
if (index < blockDim.x) shm[index] += shm[index + i];
__syncthreads();
}
if (tid == 0) atomicAdd(res, shm[0]);
}
int main()
{
thrust::device_vector<float> d_vec(128, 1.f);
float* raw_ptr = thrust::raw_pointer_cast(d_vec.data());
float* d_res;
float res = 0.f;
cudaMalloc(&d_res, sizeof(float));
cudaMemset((void*)d_res, 0, sizeof(float));
kernel<<<1, 128, 128 * sizeof(float)>>>(raw_ptr, d_vec.size(), d_res);
cudaMemcpy(&res, d_res, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << res << "\n";
return 0;
}
|
7,696 | //pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is race-free.
//
//The memcpy is between different src and dst types so we have to handle the
//arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
char y;
} s1_t; //< sizeof(s1_t) == 4
typedef struct {
short x;
short y;
} s2_t; //< sizeof(s2_t) == 4
__global__ void k(s1_t *in, s2_t *out) {
size_t len = 4;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
7,697 | #include <stdio.h>
#include <cuda.h>
__global__ void kernel()
{
printf("hello cuda\n");
__syncthreads();
}
int main()
{
kernel<<<1, 1>>>();
cudaDeviceSynchronize();
int i = 0, a = 0;
for(i = 0; i < 10000000; i ++)
a ++;
printf("a = %d\n", a);
return 0;
}
|
7,698 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__device__ void cross_prod_3_d_gpu(double * v1, double * v2, double * ans)
{
ans[0] = v1[1] * v2[2] - v1[2] * v2[1];
ans[1] = v1[0] * v2[2] - v1[2] * v2[0];
ans[2] = v1[0] * v2[1] - v1[1] * v2[0];
}
__device__ void cross_prod_3_f_gpu(float * v1, float * v2, float * ans)
{
ans[0] = v1[1] * v2[2] - v1[2] * v2[1];
ans[1] = v1[0] * v2[2] - v1[2] * v2[0];
ans[2] = v1[0] * v2[1] - v1[1] * v2[0];
}
__device__ void cross_prod_3_i_gpu(int * v1, int * v2, int * ans)
{
ans[0] = v1[1] * v2[2] - v1[2] * v2[1];
ans[1] = v1[0] * v2[2] - v1[2] * v2[0];
ans[2] = v1[0] * v2[1] - v1[1] * v2[0];
} |
7,699 | #include "includes.h"
__global__ void addKernel(float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = 1 + A[id];
}
} |
7,700 | #include "includes.h"
__global__ void selection_sum_weights(float * selection_sum, float * selection, int n, int stride) {
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
int idx = 0;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) {
selection_sum[((y+j)*width + x)] = 0;
for ( idx = 0; idx < n; idx ++) {
atomicAdd(&(selection_sum[((y+j)*width + x)]), selection[idx * stride + ((y+j)*width + x)]);
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.