serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
11,801 | #include "includes.h"
__global__ void __embedmat2d(double *a, long long *b, int nrows, int ncols, int sortdown) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
int icol;
for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) {
double v = a[i];
int vi = *((int *)&v);
if (vi & signbit) {
vi = -(vi & mag);
}
icol = (i/nrows+1);
if (sortdown) icol = ncols - icol + 1;
b[i] = (long long)vi + (((long long)icol)<<32);
}
} |
11,802 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
*/
#include "CopyValues.h"
#include "CopyValues.cuh"
#include "../../XUtility.h"
#include "../../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
copy a range of elements from a source vector to a target vector
>> s - source matrix
>> t - target matrix
<< return - succeed or not
*/
void _CudaCopyValues(const XTensor * s, XTensor * t)
{
CheckNTErrors(s != NULL && t != NULL, "The input tensor and output tensor must be nonempty!");
CheckNTErrors(s->dataType == t->dataType, "Unmatched data type!");
CheckNTErrors(s->unitSize == t->unitSize, "Incompatible data types in value copy.");
CheckNTErrors(s->unitNum == t->unitNum, "The data items are be the same.");
CheckNTErrors(s->denseRatio <= t->denseRatio, "Incompatible vectors in value copy.");
/* dense -> dense */
if (!s->isSparse && !t->isSparse) {
XMemCopy(t->data, t->devID, s->data, s->devID, s->unitSize * s->unitNum);
}
/* dense -> sparse */
else if (!s->isSparse && t->isSparse &&
s->dataType == DEFAULT_DTYPE &&
t->dataType == DEFAULT_DTYPE)
{
ShowNTErrors("TODO!");
}
/* sparse -> dense */
else if (s->isSparse && !t->isSparse &&
s->dataType == DEFAULT_DTYPE &&
t->dataType == DEFAULT_DTYPE)
{
ShowNTErrors("TODO!");
}
/* sparse -> sparse */
else if (s->isSparse && t->isSparse &&
s->dataType == DEFAULT_DTYPE &&
t->dataType == DEFAULT_DTYPE)
{
int num = s->unitNumNonZero;
int size = sizeof(int) + num * (s->unitSize + sizeof(int));
XMemCopy(t->data, t->devID, s->data, s->devID, size);
t->unitNumNonZero = num;
}
else {
ShowNTErrors("TODO!");
}
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
11,803 | #include "includes.h"
__global__ void add(float *c, float* a, float *b, int values){
int blockD = blockDim.x;
int blockX = blockIdx.x;
int threadX = threadIdx.x;
int i = blockX * blockD + threadX;
if(i < values)
c[i] = a[i] + b[i];
//printf("Hello Im thread %d in block %d of %d threads\n", threadX, blockX, blockD);
} |
11,804 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define N 32
__global__ void add(int* a, int* b, int* c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; //each block invocation refers to itself with blockIdx.x
}
void random_ints(int* a, int n);
int main(void) {
int *a,*b,*c; //cpu copies
int *gpu_a, *gpu_b, *gpu_c; //gpu copies
int size = N * sizeof(int);
//Allocate space on the gpu for a,b,c
cudaMalloc((void**)&gpu_a, size);
cudaMalloc((void**)&gpu_b, size);
cudaMalloc((void**)&gpu_c, size);
a = (int*)malloc(size); random_ints(a, N);
b = (int*)malloc(size); random_ints(b, N);
c = (int*)malloc(size);
//copy values in cpu memory to gpu memory
cudaMemcpy(gpu_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, size, cudaMemcpyHostToDevice);
//run the function
add<<<N, 1>>>(gpu_a, gpu_b, gpu_c);
//copy result back to host
cudaMemcpy(c, gpu_c, size, cudaMemcpyDeviceToHost);
int i;
printf("Sum is: ");
for (i = 0; i < N; i++) {
printf("%i, ", c[i]);
}
printf("\n");
cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c);
free(a); free(b); free(c);
return 0;
}
void random_ints(int* a, int n) {
int i;
for (i = 0; i < n; i++) {
*(a + i) = i;
}
}
|
11,805 | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Template project which demonstrates the basics on how to setup a project
* example application.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil_inline.h>
// includes, kernels
//#include <alignment_kernel.cu>
//********************** global variables ***************************
// BLOSUM62 matrix and its size
// these should be copied to constant memory in the device
/* const int size_blosum = 24; */
/* int BLOSUM62[size_blosum*size_blosum] = { */
/* 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4, */
/* -1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4, */
/* -2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4, */
/* -2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4, */
/* 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4, */
/* -1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4, */
/* -1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4, */
/* 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4, */
/* -2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4, */
/* -1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4, */
/* -1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4, */
/* -1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4, */
/* -1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4, */
/* -2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4, */
/* -1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4, */
/* 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4, */
/* 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4, */
/* -3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4, */
/* -2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4, */
/* 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4, */
/* -2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4, */
/* -1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4, */
/* 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4, */
/* -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1 */
/* }; */
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void alignment();
// external definition of the alignment kernel
extern "C"
void alignmentKernel( int* g_idata, int* g_index, int* g_odata );
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
//void runTest( int argc, char** argv)
void alignment()
{
/* // use command-line specified CUDA device, otherwise use device with highest Gflops/s */
/* if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) */
/* cutilDeviceInit(argc, argv); */
/* else */
/* cudaSetDevice( cutGetMaxGflopsDeviceId() ); */
/* // setup the timer */
/* unsigned int timer = 0; */
/* cutilCheckError( cutCreateTimer( &timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* // input */
/* char *a = "atcggtcaattggcc"; */
/* char *b = "atgccctgatgggccga"; */
/* // dimension of multi-threading */
/* unsigned int num_threads = 1; */
/* unsigned int mem_size = sizeof( char ) * (num_threads + 1) * 1000; */
/* unsigned int index_size = (num_threads + 2) * sizeof(int); */
/* unsigned int out_size = num_threads * sizeof(int); */
/* unsigned int shared_mem_size = num_threads * strlen(a) * sizeof(char) * 2 ; */
/* // allocate host memory */
/* char* h_idata = (char*) malloc( mem_size ); */
/* int* h_index = (int*) malloc( index_size ); */
/* // initalize the memory */
/* /\* for( unsigned int i = 0; i < num_threads; ++i) */
/* { */
/* h_idata[i] = (float) i; */
/* }*\/ */
/* *(h_index) = strlen(a); */
/* *(h_index+1) = strlen(a) + strlen(b); */
/* memcpy( h_idata, a, strlen(a) * sizeof(char) ); */
/* memcpy( h_idata + strlen(a) * sizeof(char), b, strlen(b) * sizeof(char) ); */
/* // allocate device memory */
/* int* d_idata; */
/* int* d_index; */
/* cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size)); */
/* cutilSafeCall( cudaMalloc( (void**) &d_index, index_size ) ); */
/* // copy host memory to device */
/* cutilSafeCall( cudaMemcpy( d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) ); */
/* cutilSafeCall( cudaMemcpy( d_index, h_index, index_size, cudaMemcpyHostToDevice ) ); */
/* // allocate device memory for result */
/* int* d_odata; */
/* cutilSafeCall( cudaMalloc( (void**) &d_odata, out_size ) ); */
/* // setup execution parameters */
/* dim3 grid( 1, 1, 1); */
/* dim3 threads( num_threads, 1, 1); */
/* // execute the kernel */
/* alignmentKernel<<< grid, threads, shared_mem_size >>>( d_idata, d_index,d_odata); */
/* // check if kernel execution generated and error */
/* cutilCheckMsg("Kernel execution failed"); */
/* // allocate mem for the result on host side */
/* int* h_odata = (int*) malloc( out_size ); */
/* // copy result from device to host */
/* cutilSafeCall( cudaMemcpy( h_odata, d_odata, sizeof( float) * num_threads, */
/* cudaMemcpyDeviceToHost) ); */
/* // compute the time spent for device computation */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer)); */
/* cutilCheckError( cutDeleteTimer( timer)); */
/* // calculate the final result */
/* int result = 0; */
/* for ( int i =0; i< num_threads; ++i ) */
/* { */
/* result += *( h_odata + i ); */
/* } */
/* printf("result is %d\n", result); */
/* // cleanup memory */
/* free( h_idata); */
/* free( h_index ); */
/* free( h_odata); */
/* cutilSafeCall(cudaFree(d_idata)); */
/* cutilSafeCall(cudaFree(d_odata)); */
/* cutilSafeCall(cudaFree(d_index)); */
/* cudaThreadExit(); */
}
|
11,806 | #include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#define damping_factor 0.9
#define thread_num 128
#define block_num ((n_vertices + thread_num-1)/thread_num)
#define ctm 1024
#define cbm 32768
__global__ void initializePagerankArray(float * pagerank_d,float * pagerank_next_d, int n_vertices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < n_vertices) {
pagerank_d[i] = 1.0/(float)n_vertices;
pagerank_next_d[i] = 0.0;
}
}
__global__ void addToNextPagerankArray(float * pagerank_d, float * pagerank_next_d, int * n_successors_d, int * successors_d, int * successor_offset_d, float * dangling_value2, int n_vertices) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j;
int n_suc;
if (i < n_vertices) {
n_suc = n_successors_d[i];
if(n_suc > 0) {
for(j = 0; j < n_suc; j++) {
atomicAdd(&(pagerank_next_d[successors_d[successor_offset_d[i]+j]]), damping_factor*(pagerank_d[i])/n_suc);
}
} else {
atomicAdd(dangling_value2, damping_factor*pagerank_d[i]);
}
}
}
__global__ void finalPagerankArrayForIteration(float * pagerank_next_d, int n_vertices, float dangling_value2) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if(i < n_vertices) {
atomicAdd(&(pagerank_next_d[i]),(dangling_value2 + (1-damping_factor))/((float)n_vertices));
//pagerank_next_d[i] += (dangling_value2 + (1-0.85))/((float)n_vertices);
}
}
__global__ void setPagerankArrayFromNext(float * pagerank_d, float * pagerank_next_d, int n_vertices, float *diffs) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
float temp;
if(i < n_vertices) {
temp=pagerank_d[i];
pagerank_d[i] = pagerank_next_d[i];
pagerank_next_d[i] = 0.0;
diffs[i]=((temp - pagerank_d[i])>=0)?(temp- pagerank_d[i]):(pagerank_d[i]-temp);
}
}
__global__ void convergence(float *g_idata, float *g_odata, int n) {
__shared__ float sdata[2*ctm];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s = blockDim.x/2; s > 0; s /= 2) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main(int argc, char ** args) {
if (argc != 2) {
fprintf(stderr,"Wrong number of args. Provide input graph file.\n");
exit(-1);
}
cudaFree(0);
cudaError_t err = cudaSuccess;
cudaProfilerStart();
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Start CPU timer
clock_t cycles_to_build, cycles_to_calc;
// build up the graph
int i;
unsigned int n_vertices = 0;
unsigned int n_edges = 0;
unsigned int vertex_from = 0, vertex_to = 0, vertex_prev = 0;
// Vertex
float *diffs_new_h;
float * pagerank_h, *pagerank_d, *diffs, *diffs_new;
float *pagerank_next_d;
int * n_successors_h;
int * successors_h;
int * successor_offset_h;
int * successor_offset_d;
FILE * fp;
if ((fp = fopen(args[1], "r")) == NULL) {
fprintf(stderr,"ERROR: Could not open input file.\n");
exit(-1);
}
// parse input file to count the number of vertices
while (fscanf(fp, "%u %u", &vertex_from, &vertex_to) != EOF) {
if (vertex_from > n_vertices) {
n_vertices = vertex_from;
}
else if (vertex_to > n_vertices) {
n_vertices = vertex_to;
}
n_edges++;
}
n_vertices++;
clock_t start = clock();
// Allocate flattened data structure host and device memory
pagerank_h = (float *) malloc(n_vertices * sizeof(*pagerank_h));
diffs_new_h = (float * ) malloc(cbm*sizeof(*diffs_new_h));
err = cudaMalloc((void **)&pagerank_d, n_vertices*sizeof(float));
err = cudaMalloc((void **)&diffs, n_vertices*sizeof(float));
err = cudaMalloc((void **)&diffs_new, cbm*sizeof(float));
err = cudaMalloc((void **)&pagerank_next_d, n_vertices*sizeof(float));
err = cudaHostAlloc((void **)&n_successors_h, n_vertices*sizeof(int),cudaHostAllocDefault);
successor_offset_h = (int *) malloc(n_vertices * sizeof(*successor_offset_h));
err = cudaMalloc((void **)&successor_offset_d, n_vertices*sizeof(int));
err = cudaHostAlloc((void**)&successors_h, n_edges*sizeof(int),cudaHostAllocDefault);
// allocate memory for successor pointers
int offset = 0, edges = 0;
// parse input file to count the number of successors of each vertex
fseek(fp, 0L, SEEK_SET);
i = 0;
while (fscanf(fp, "%u %u", &vertex_from, &vertex_to) != EOF) {
n_successors_h[vertex_from] += 1;
// Fill successor_offset_h array
successor_offset_h[i] = offset;
if(edges != 0 && vertex_prev != vertex_from) {
i = vertex_from;
offset = edges;
successor_offset_h[i] = offset;
vertex_prev = vertex_from;
}
// Fill successor array
successors_h[edges] = vertex_to;
edges++;
}
successor_offset_h[i] = edges - 1;
fclose(fp);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Get build time and reset start
cycles_to_build = clock() - start;
start = clock();
err = cudaMemcpy(successor_offset_d, successor_offset_h, n_vertices*sizeof(int), cudaMemcpyHostToDevice);
// Compute the pagerank
int n_iterations = 60;
int iteration = 0;
int numOfBlocks = 1;
int threadsPerBlock = 1000;
if(n_vertices <= thread_num) {
threadsPerBlock = n_vertices;
numOfBlocks = 1;
} else {
threadsPerBlock = thread_num;
numOfBlocks = (n_vertices + thread_num-1)/thread_num; // The "+ 1023" ensures we round up
}
float dangling_value_h = 0;
float dangling_value_h2 = 0;
float *dangling_value2;
int n_blocks = (n_vertices + 2048 - 1)/2048;
if (n_blocks == 0){
n_blocks = 1;
}
float epsilon = 0.000001;
//float * d_diff;
//float diff;
float diff = epsilon + 1;
//err = cudaMalloc((void **)&d_diff, sizeof(float));
err = cudaMalloc((void **)&dangling_value2, sizeof(float));
err = cudaMemcpy(dangling_value2, &dangling_value_h, sizeof(float), cudaMemcpyHostToDevice);
initializePagerankArray<<<numOfBlocks,threadsPerBlock>>>(pagerank_d, pagerank_next_d, n_vertices);
while(epsilon < diff && iteration < n_iterations) { //was 23
// set the dangling value to 0
dangling_value_h = 0;
err = cudaMemcpy(dangling_value2, &dangling_value_h, sizeof(float), cudaMemcpyHostToDevice);
// initial parallel pagerank_next computation
addToNextPagerankArray<<<numOfBlocks,threadsPerBlock>>>(pagerank_d, pagerank_next_d, n_successors_h, successors_h, successor_offset_d, dangling_value2, n_vertices);
// get the dangling value
err = cudaMemcpy(&dangling_value_h2, dangling_value2, sizeof(float), cudaMemcpyDeviceToHost);
// final parallel pagerank_next computation
finalPagerankArrayForIteration<<<numOfBlocks,threadsPerBlock>>>(pagerank_next_d, n_vertices, dangling_value_h2);
// Get difference to compare to epsilon
//cudaMemset(d_diff, 0, sizeof(float) );
cudaMemset(diffs_new, 0, cbm*sizeof(float) );
setPagerankArrayFromNext<<<numOfBlocks,threadsPerBlock>>>(pagerank_d, pagerank_next_d, n_vertices, diffs);
convergence<<<cbm,ctm>>>(diffs, diffs_new, n_vertices);
cudaMemcpy(diffs_new_h, diffs_new, sizeof(float) * cbm, cudaMemcpyDeviceToHost);
/*
for (i = 0; i < cbm; i++) {
printf("%f\n",diffs_new_h[i]);
}*/
diff=0;
for (i = 0; i < cbm; i++) {
diff += diffs_new_h[i];
}
//h_diff=diff;
printf("Probe : %f\n",diff);
cudaDeviceSynchronize();
iteration++;
}
err = cudaMemcpy(pagerank_h, pagerank_d, n_vertices*sizeof(float), cudaMemcpyDeviceToHost);
// CPU time
cycles_to_calc = clock() - start;
int build_milli = cycles_to_build * 1000 / CLOCKS_PER_SEC;
int calc_milli = cycles_to_calc * 1000 / CLOCKS_PER_SEC;
FILE *f_result;
f_result=fopen("rg","w");
for (i=0;i<n_vertices;i++) {
fprintf(f_result,"Vertex %u:\tpagerank = %.18f\n", i, pagerank_h[i]);
}
printf("Time to build: %d seconds, %d milliseconds\n",build_milli/1000, build_milli%1000);
printf("Time to calc: %d seconds, %d milliseconds\n",calc_milli/1000, calc_milli%1000);
printf("iter: %d\n", iteration);
// Free device global memory
err = cudaFree(pagerank_d);
err = cudaFree(pagerank_next_d);
// Free host memory
free(pagerank_h);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
11,807 | // Undone
__global__ void reduction_sum( double *h_input, double *h_output, int ARRAY_SIZE, int ARRAY_BYTES ){
// Create, Allocate, Calculate, Free Memory, and Return
return;
}
|
11,808 | /* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
/* Preforms the same computation that the CPU implementation
did. It stores the answers in the newDisplacement. */
__global__
void cudaOneDimWaveKernel(const float *old, const float *curr, float *new_d,
int n_Nodes, float cour) {
for (unsigned int a = 1 + blockIdx.x * blockDim.x + threadIdx.x; a <= n_Nodes - 2; a += threadIdx.x*gridDim.x){
new_d[a] =
2*curr[a] - old[a]
+ cour * (curr[a+1]
- 2*curr[a]
+ curr[a-1]);
}
}
// Calls the kernel by piping in the blocks and threads/block
// correctly.
void callOneDimWave(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *old,
const float *curr,
float *new_d,
const unsigned int n_Nodes,
float cour) {
cudaOneDimWaveKernel<<<blocks, threadsPerBlock>>> (old,curr,new_d,n_Nodes, cour);
}
|
11,809 | #include "includes.h"
#pragma once
/*
//åñëè äëÿ âñåõ êàðò õâàòèò âîçìîæíîñòåé âèäåîêàðòû (ÐÀÁÎÒÀÅÒ)
*/
__global__ void MapSplit1(const int* one, int* result, unsigned int mx, unsigned int width)
{
const unsigned int ppp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int rix = ppp % width;
const unsigned int riy = (ppp / mx) + ((ppp % mx) / width);
const unsigned int xxx = riy * width + rix;
const unsigned int ddx = riy * mx + rix;
result[xxx] = one[ddx];
} |
11,810 | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536f
/*
* Paint a 2D texture with a moving red/green hatch pattern on a
* strobing blue background. Note that this kernel reads to and
* writes from the texture, hence why this texture was not mapped
* as WriteDiscard.
*/
__global__ void cuda_kernel_texture_2d(unsigned char *surface, int width, int height, size_t pitch, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
float *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to the pixel at (x,y)
pixel = (float *)(surface + y*pitch) + 4*x;
// populate it
float value_x = 0.5f + 0.5f*cos(t + 10.0f*((2.0f*x)/width - 1.0f));
float value_y = 0.5f + 0.5f*cos(t + 10.0f*((2.0f*y)/height - 1.0f));
pixel[0] = 0.5*pixel[0] + 0.5*pow(value_x, 3.0f); // red
pixel[1] = 0.5*pixel[1] + 0.5*pow(value_y, 3.0f); // green
pixel[2] = 0.5f + 0.5f*cos(t); // blue
pixel[3] = 1; // alpha
}
extern "C"
void cuda_texture_2d(void *surface, int width, int height, size_t pitch, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y);
cuda_kernel_texture_2d<<<Dg,Db>>>((unsigned char *)surface, width, height, pitch, t);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cuda_kernel_texture_2d() failed to launch error = %d\n", error);
}
}
|
11,811 | #include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void Ring2_kernel( float *A, float *BP, int *corrAB, int *mask, int *m, int ring, int c, int h, int w )
{
int id1 = blockIdx.x * blockDim.x + threadIdx.x;
int size = h * w;
if (id1 < size) {
// int y1 = id1 / w, x1 = id1 % w;
if (mask[id1] != 0) {
int y2 = corrAB[2 * id1 + 1], x2 = corrAB[2 * id1 + 0];
for (int dx = -ring; dx <= ring; dx++)
for (int dy = -ring; dy <= ring; dy++)
{
int _x2 = x2 + dx, _y2 = y2 + dy;
if (_x2 >= 0 && _x2 < w && _y2 >= 0 && _y2 < h)
{
m[_y2 * w + _x2] = 1;
}
}
}
}
return ;
} |
11,812 |
// Includes
#include <stdio.h>
// Constants
#define WPT 64
#define THREADS 512
#define BLOCKS 14*2*8
#define N (WPT*BLOCKS*THREADS)
// Kernel
__global__ void bwbench(unsigned *A, unsigned *B) {
unsigned i = blockIdx.x*THREADS + threadIdx.x;
B[i+ 0*THREADS*BLOCKS] = A[i+ 0*THREADS*BLOCKS];
B[i+ 1*THREADS*BLOCKS] = A[i+ 1*THREADS*BLOCKS];
B[i+ 2*THREADS*BLOCKS] = A[i+ 2*THREADS*BLOCKS];
B[i+ 3*THREADS*BLOCKS] = A[i+ 3*THREADS*BLOCKS];
B[i+ 4*THREADS*BLOCKS] = A[i+ 4*THREADS*BLOCKS];
B[i+ 5*THREADS*BLOCKS] = A[i+ 5*THREADS*BLOCKS];
B[i+ 6*THREADS*BLOCKS] = A[i+ 6*THREADS*BLOCKS];
B[i+ 7*THREADS*BLOCKS] = A[i+ 7*THREADS*BLOCKS];
B[i+ 8*THREADS*BLOCKS] = A[i+ 8*THREADS*BLOCKS];
B[i+ 9*THREADS*BLOCKS] = A[i+ 9*THREADS*BLOCKS];
B[i+10*THREADS*BLOCKS] = A[i+10*THREADS*BLOCKS];
B[i+11*THREADS*BLOCKS] = A[i+11*THREADS*BLOCKS];
B[i+12*THREADS*BLOCKS] = A[i+12*THREADS*BLOCKS];
B[i+13*THREADS*BLOCKS] = A[i+13*THREADS*BLOCKS];
B[i+14*THREADS*BLOCKS] = A[i+14*THREADS*BLOCKS];
B[i+15*THREADS*BLOCKS] = A[i+15*THREADS*BLOCKS];
B[i+16*THREADS*BLOCKS] = A[i+16*THREADS*BLOCKS];
B[i+17*THREADS*BLOCKS] = A[i+17*THREADS*BLOCKS];
B[i+18*THREADS*BLOCKS] = A[i+18*THREADS*BLOCKS];
B[i+19*THREADS*BLOCKS] = A[i+19*THREADS*BLOCKS];
B[i+20*THREADS*BLOCKS] = A[i+20*THREADS*BLOCKS];
B[i+21*THREADS*BLOCKS] = A[i+21*THREADS*BLOCKS];
B[i+22*THREADS*BLOCKS] = A[i+22*THREADS*BLOCKS];
B[i+23*THREADS*BLOCKS] = A[i+23*THREADS*BLOCKS];
B[i+24*THREADS*BLOCKS] = A[i+24*THREADS*BLOCKS];
B[i+25*THREADS*BLOCKS] = A[i+25*THREADS*BLOCKS];
B[i+26*THREADS*BLOCKS] = A[i+26*THREADS*BLOCKS];
B[i+27*THREADS*BLOCKS] = A[i+27*THREADS*BLOCKS];
B[i+28*THREADS*BLOCKS] = A[i+28*THREADS*BLOCKS];
B[i+29*THREADS*BLOCKS] = A[i+29*THREADS*BLOCKS];
B[i+30*THREADS*BLOCKS] = A[i+30*THREADS*BLOCKS];
B[i+31*THREADS*BLOCKS] = A[i+31*THREADS*BLOCKS];
B[i+32*THREADS*BLOCKS] = A[i+32*THREADS*BLOCKS];
B[i+33*THREADS*BLOCKS] = A[i+33*THREADS*BLOCKS];
B[i+34*THREADS*BLOCKS] = A[i+34*THREADS*BLOCKS];
B[i+35*THREADS*BLOCKS] = A[i+35*THREADS*BLOCKS];
B[i+36*THREADS*BLOCKS] = A[i+36*THREADS*BLOCKS];
B[i+37*THREADS*BLOCKS] = A[i+37*THREADS*BLOCKS];
B[i+38*THREADS*BLOCKS] = A[i+38*THREADS*BLOCKS];
B[i+39*THREADS*BLOCKS] = A[i+39*THREADS*BLOCKS];
B[i+40*THREADS*BLOCKS] = A[i+40*THREADS*BLOCKS];
B[i+41*THREADS*BLOCKS] = A[i+41*THREADS*BLOCKS];
B[i+42*THREADS*BLOCKS] = A[i+42*THREADS*BLOCKS];
B[i+43*THREADS*BLOCKS] = A[i+43*THREADS*BLOCKS];
B[i+44*THREADS*BLOCKS] = A[i+44*THREADS*BLOCKS];
B[i+45*THREADS*BLOCKS] = A[i+45*THREADS*BLOCKS];
B[i+46*THREADS*BLOCKS] = A[i+46*THREADS*BLOCKS];
B[i+47*THREADS*BLOCKS] = A[i+47*THREADS*BLOCKS];
B[i+48*THREADS*BLOCKS] = A[i+48*THREADS*BLOCKS];
B[i+49*THREADS*BLOCKS] = A[i+49*THREADS*BLOCKS];
B[i+50*THREADS*BLOCKS] = A[i+50*THREADS*BLOCKS];
B[i+51*THREADS*BLOCKS] = A[i+51*THREADS*BLOCKS];
B[i+52*THREADS*BLOCKS] = A[i+52*THREADS*BLOCKS];
B[i+53*THREADS*BLOCKS] = A[i+53*THREADS*BLOCKS];
B[i+54*THREADS*BLOCKS] = A[i+54*THREADS*BLOCKS];
B[i+55*THREADS*BLOCKS] = A[i+55*THREADS*BLOCKS];
B[i+56*THREADS*BLOCKS] = A[i+56*THREADS*BLOCKS];
B[i+57*THREADS*BLOCKS] = A[i+57*THREADS*BLOCKS];
B[i+58*THREADS*BLOCKS] = A[i+58*THREADS*BLOCKS];
B[i+59*THREADS*BLOCKS] = A[i+59*THREADS*BLOCKS];
B[i+60*THREADS*BLOCKS] = A[i+60*THREADS*BLOCKS];
B[i+61*THREADS*BLOCKS] = A[i+61*THREADS*BLOCKS];
B[i+62*THREADS*BLOCKS] = A[i+62*THREADS*BLOCKS];
B[i+63*THREADS*BLOCKS] = A[i+63*THREADS*BLOCKS];
}
// Timers
cudaEvent_t start;
void timer_start();
void timer_stop();
// Main function
int main(void) {
unsigned size = N*sizeof(unsigned);
// Allocate and initialise the data
unsigned *A = (unsigned *)malloc(size);
unsigned *B = (unsigned *)malloc(size);
for (unsigned i=0; i<N; i++) {
A[i] = i;
B[i] = 0;
}
// Allocate CUDA arrays
unsigned *devA = 0;
unsigned *devB = 0;
cudaMalloc(&devA, size);
cudaMalloc(&devB, size);
// Copy to the GPU
cudaMemcpy(devA, A, size, cudaMemcpyHostToDevice);
// Configure the kernel
dim3 threads(THREADS);
dim3 blocks(BLOCKS);
// Launch the kernel
timer_start();
bwbench<<<blocks, threads>>>(devA, devB);
timer_stop();
// Copy from the GPU
cudaMemcpy(B, devB, size, cudaMemcpyDeviceToHost);
// Clean-up and exit
cudaFree(A);
cudaFree(B);
free(A);
free(B);
return 0;
}
// Start the timer
void timer_start() {
cudaDeviceSynchronize();
cudaEventCreate(&start);
cudaEventRecord(start);
}
// End the timer
void timer_stop() {
cudaDeviceSynchronize();
cudaEvent_t stop;
cudaEventCreate(&stop);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float timer = 0;
cudaEventElapsedTime(&timer,start,stop);
printf("Execution time: %.3lf ms \n", timer);
float megabytes = (N*2*sizeof(unsigned)) / (1024*1024);
printf("Bandwidth: %.3lf GB/s \n", megabytes/timer);
}
|
11,813 | /* This program is aimed to do the cuda implementation of DESnuts */
#include <cuda.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdio.h>
#include <inttypes.h>
/**********************************************************************/
/* */
/* DES TABLES */
/* */
/**********************************************************************/
#define MAX_THREADS_1D 32
#define MAX_BLOCKS_1D 256
#define CONSTANT_SIZE (sizeof(int) * 8 * 64)
__constant__ int S_TABLE[CONSTANT_SIZE];
/*
* S Tables: Introduce nonlinearity and avalanche
*/
static int table_DES_S[8][64] = {
/* table S[0] */
{ 13, 1, 2, 15, 8, 13, 4, 8, 6, 10, 15, 3, 11, 7, 1, 4,
10, 12, 9, 5, 3, 6, 14, 11, 5, 0, 0, 14, 12, 9, 7, 2,
7, 2, 11, 1, 4, 14, 1, 7, 9, 4, 12, 10, 14, 8, 2, 13,
0, 15, 6, 12, 10, 9, 13, 0, 15, 3, 3, 5, 5, 6, 8, 11 },
/* table S[1] */
{ 4, 13, 11, 0, 2, 11, 14, 7, 15, 4, 0, 9, 8, 1, 13, 10,
3, 14, 12, 3, 9, 5, 7, 12, 5, 2, 10, 15, 6, 8, 1, 6,
1, 6, 4, 11, 11, 13, 13, 8, 12, 1, 3, 4, 7, 10, 14, 7,
10, 9, 15, 5, 6, 0, 8, 15, 0, 14, 5, 2, 9, 3, 2, 12 },
/* table S[2] */
{ 12, 10, 1, 15, 10, 4, 15, 2, 9, 7, 2, 12, 6, 9, 8, 5,
0, 6, 13, 1, 3, 13, 4, 14, 14, 0, 7, 11, 5, 3, 11, 8,
9, 4, 14, 3, 15, 2, 5, 12, 2, 9, 8, 5, 12, 15, 3, 10,
7, 11, 0, 14, 4, 1, 10, 7, 1, 6, 13, 0, 11, 8, 6, 13 },
/* table S[3] */
{ 2, 14, 12, 11, 4, 2, 1, 12, 7, 4, 10, 7, 11, 13, 6, 1,
8, 5, 5, 0, 3, 15, 15, 10, 13, 3, 0, 9, 14, 8, 9, 6,
4, 11, 2, 8, 1, 12, 11, 7, 10, 1, 13, 14, 7, 2, 8, 13,
15, 6, 9, 15, 12, 0, 5, 9, 6, 10, 3, 4, 0, 5, 14, 3 },
/* table S[4] */
{ 7, 13, 13, 8, 14, 11, 3, 5, 0, 6, 6, 15, 9, 0, 10, 3,
1, 4, 2, 7, 8, 2, 5, 12, 11, 1, 12, 10, 4, 14, 15, 9,
10, 3, 6, 15, 9, 0, 0, 6, 12, 10, 11, 1, 7, 13, 13, 8,
15, 9, 1, 4, 3, 5, 14, 11, 5, 12, 2, 7, 8, 2, 4, 14 },
/* table S[5] */
{ 10, 13, 0, 7, 9, 0, 14, 9, 6, 3, 3, 4, 15, 6, 5, 10,
1, 2, 13, 8, 12, 5, 7, 14, 11, 12, 4, 11, 2, 15, 8, 1,
13, 1, 6, 10, 4, 13, 9, 0, 8, 6, 15, 9, 3, 8, 0, 7,
11, 4, 1, 15, 2, 14, 12, 3, 5, 11, 10, 5, 14, 2, 7, 12 },
/* table S[6] */
{ 15, 3, 1, 13, 8, 4, 14, 7, 6, 15, 11, 2, 3, 8, 4, 14,
9, 12, 7, 0, 2, 1, 13, 10, 12, 6, 0, 9, 5, 11, 10, 5,
0, 13, 14, 8, 7, 10, 11, 1, 10, 3, 4, 15, 13, 4, 1, 2,
5, 11, 8, 6, 12, 7, 6, 12, 9, 0, 3, 5, 2, 14, 15, 9 },
/* table S[7] */
{ 14, 0, 4, 15, 13, 7, 1, 4, 2, 14, 15, 2, 11, 13, 8, 1,
3, 10, 10, 6, 6, 12, 12, 11, 5, 9, 9, 5, 0, 3, 7, 8,
4, 15, 1, 12, 14, 8, 8, 2, 13, 4, 6, 9, 2, 1, 11, 7,
15, 5, 12, 11, 9, 3, 7, 14, 3, 10, 10, 0, 5, 6, 0, 13 }
};
/*
void print_bits_array(uint64_t n) {
printf("%lX\n", n);
}
void print_bits(uint64_t n) {
for (int i = 0 ; i < 64; i++) {
if (i == 32)
printf("\n");
printf("%d", ((n) & 0x8000000000000000) >> 63);
n <<= 1;
}
printf("\n");
printf("\n");
}
*/
#define MASK56(n) ((n) & 0x00FFFFFFFFFFFFFF)
#define MASK48(n) ((n) & 0x0000FFFFFFFFFFFF)
#define COMPUTE_ROUND_KEY(roundKey, key) \
roundKey |= ((key & ((1UL) << 0)) << (27)); \
roundKey |= ((key & ((1UL) << 1)) << (18)); \
roundKey |= ((key & ((1UL) << 2)) << (9)); \
roundKey |= ((key & ((1UL) << 3)) << (28)); \
roundKey |= ((key & ((1UL) << 4)) << (35)); \
roundKey |= ((key & ((1UL) << 5)) << (42)); \
roundKey |= ((key & ((1UL) << 6)) << (49)); \
roundKey |= ((key & ((1UL) << 7)) << (19)); \
roundKey |= ((key & ((1UL) << 8)) << (10)); \
roundKey |= ((key & ((1UL) << 9)) << (1)); \
roundKey |= ((key & ((1UL) << 10)) << (20)); \
roundKey |= ((key & ((1UL) << 11)) << (27)); \
roundKey |= ((key & ((1UL) << 12)) << (34)); \
roundKey |= ((key & ((1UL) << 13)) << (41)); \
roundKey |= ((key & ((1UL) << 14)) << (11)); \
roundKey |= ((key & ((1UL) << 15)) << (2)); \
roundKey |= ((key & ((1UL) << 16)) >> (7)); \
roundKey |= ((key & ((1UL) << 17)) << (12)); \
roundKey |= ((key & ((1UL) << 18)) << (19)); \
roundKey |= ((key & ((1UL) << 19)) << (26)); \
roundKey |= ((key & ((1UL) << 20)) << (33)); \
roundKey |= ((key & ((1UL) << 21)) << (3)); \
roundKey |= ((key & ((1UL) << 22)) >> (6)); \
roundKey |= ((key & ((1UL) << 23)) >> (15)); \
roundKey |= ((key & ((1UL) << 24)) << (4)); \
roundKey |= ((key & ((1UL) << 25)) << (11)); \
roundKey |= ((key & ((1UL) << 26)) << (18)); \
roundKey |= ((key & ((1UL) << 27)) << (25)); \
roundKey |= ((key & ((1UL) << 28)) >> (5)); \
roundKey |= ((key & ((1UL) << 29)) >> (14)); \
roundKey |= ((key & ((1UL) << 30)) >> (23)); \
roundKey |= ((key & ((1UL) << 31)) >> (28)); \
roundKey |= ((key & ((1UL) << 32)) << (3)); \
roundKey |= ((key & ((1UL) << 33)) << (10)); \
roundKey |= ((key & ((1UL) << 34)) << (17)); \
roundKey |= ((key & ((1UL) << 35)) >> (13)); \
roundKey |= ((key & ((1UL) << 36)) >> (22)); \
roundKey |= ((key & ((1UL) << 37)) >> (31)); \
roundKey |= ((key & ((1UL) << 38)) >> (36)); \
roundKey |= ((key & ((1UL) << 39)) >> (5)); \
roundKey |= ((key & ((1UL) << 40)) << (2)); \
roundKey |= ((key & ((1UL) << 41)) << (9)); \
roundKey |= ((key & ((1UL) << 42)) >> (21)); \
roundKey |= ((key & ((1UL) << 43)) >> (30)); \
roundKey |= ((key & ((1UL) << 44)) >> (39)); \
roundKey |= ((key & ((1UL) << 45)) >> (44)); \
roundKey |= ((key & ((1UL) << 46)) >> (13)); \
roundKey |= ((key & ((1UL) << 47)) >> (6)); \
roundKey |= ((key & ((1UL) << 48)) << (1)); \
roundKey |= ((key & ((1UL) << 49)) >> (29)); \
roundKey |= ((key & ((1UL) << 50)) >> (38)); \
roundKey |= ((key & ((1UL) << 51)) >> (47)); \
roundKey |= ((key & ((1UL) << 52)) >> (52)); \
roundKey |= ((key & ((1UL) << 53)) >> (21)); \
roundKey |= ((key & ((1UL) << 54)) >> (14)); \
roundKey |= ((key & ((1UL) << 55)) >> (7)); \
#define COMPUTE_IP(L, R, in) \
temp = 0UL; \
temp |= ((in & ((1UL) << 63)) >> (39)); \
temp |= ((in & ((1UL) << 62)) >> (6)); \
temp |= ((in & ((1UL) << 61)) >> (45)); \
temp |= ((in & ((1UL) << 60)) >> (12)); \
temp |= ((in & ((1UL) << 59)) >> (51)); \
temp |= ((in & ((1UL) << 58)) >> (18)); \
temp |= ((in & ((1UL) << 57)) >> (57)); \
temp |= ((in & ((1UL) << 56)) >> (24)); \
temp |= ((in & ((1UL) << 55)) >> (30)); \
temp |= ((in & ((1UL) << 54)) << (3)); \
temp |= ((in & ((1UL) << 53)) >> (36)); \
temp |= ((in & ((1UL) << 52)) >> (3)); \
temp |= ((in & ((1UL) << 51)) >> (42)); \
temp |= ((in & ((1UL) << 50)) >> (9)); \
temp |= ((in & ((1UL) << 49)) >> (48)); \
temp |= ((in & ((1UL) << 48)) >> (15)); \
temp |= ((in & ((1UL) << 47)) >> (21)); \
temp |= ((in & ((1UL) << 46)) << (12)); \
temp |= ((in & ((1UL) << 45)) >> (27)); \
temp |= ((in & ((1UL) << 44)) << (6)); \
temp |= ((in & ((1UL) << 43)) >> (33)); \
temp |= ((in & ((1UL) << 42)) << (0)); \
temp |= ((in & ((1UL) << 41)) >> (39)); \
temp |= ((in & ((1UL) << 40)) >> (6)); \
temp |= ((in & ((1UL) << 39)) >> (12)); \
temp |= ((in & ((1UL) << 38)) << (21)); \
temp |= ((in & ((1UL) << 37)) >> (18)); \
temp |= ((in & ((1UL) << 36)) << (15)); \
temp |= ((in & ((1UL) << 35)) >> (24)); \
temp |= ((in & ((1UL) << 34)) << (9)); \
temp |= ((in & ((1UL) << 33)) >> (30)); \
temp |= ((in & ((1UL) << 32)) << (3)); \
temp |= ((in & ((1UL) << 31)) >> (3)); \
temp |= ((in & ((1UL) << 30)) << (30)); \
temp |= ((in & ((1UL) << 29)) >> (9)); \
temp |= ((in & ((1UL) << 28)) << (24)); \
temp |= ((in & ((1UL) << 27)) >> (15)); \
temp |= ((in & ((1UL) << 26)) << (18)); \
temp |= ((in & ((1UL) << 25)) >> (21)); \
temp |= ((in & ((1UL) << 24)) << (12)); \
temp |= ((in & ((1UL) << 23)) << (6)); \
temp |= ((in & ((1UL) << 22)) << (39)); \
temp |= ((in & ((1UL) << 21)) << (0)); \
temp |= ((in & ((1UL) << 20)) << (33)); \
temp |= ((in & ((1UL) << 19)) >> (6)); \
temp |= ((in & ((1UL) << 18)) << (27)); \
temp |= ((in & ((1UL) << 17)) >> (12)); \
temp |= ((in & ((1UL) << 16)) << (21)); \
temp |= ((in & ((1UL) << 15)) << (15)); \
temp |= ((in & ((1UL) << 14)) << (48)); \
temp |= ((in & ((1UL) << 13)) << (9)); \
temp |= ((in & ((1UL) << 12)) << (42)); \
temp |= ((in & ((1UL) << 11)) << (3)); \
temp |= ((in & ((1UL) << 10)) << (36)); \
temp |= ((in & ((1UL) << 9)) >> (3)); \
temp |= ((in & ((1UL) << 8)) << (30)); \
temp |= ((in & ((1UL) << 7)) << (24)); \
temp |= ((in & ((1UL) << 6)) << (57)); \
temp |= ((in & ((1UL) << 5)) << (18)); \
temp |= ((in & ((1UL) << 4)) << (51)); \
temp |= ((in & ((1UL) << 3)) << (12)); \
temp |= ((in & ((1UL) << 2)) << (45)); \
temp |= ((in & ((1UL) << 1)) << (6)); \
temp |= ((in & ((1UL) << 0)) << (39)); \
L = (temp >> 32) & 0xFFFFFFFF; \
R = (temp) & 0xFFFFFFFF; \
#define COMPUTE_FP(out, L, R) \
temp = L; \
temp = (temp << 32) | R; \
out |= ((temp & ((1UL) << 63)) >> (57)); \
out |= ((temp & ((1UL) << 62)) >> (48)); \
out |= ((temp & ((1UL) << 61)) >> (39)); \
out |= ((temp & ((1UL) << 60)) >> (30)); \
out |= ((temp & ((1UL) << 59)) >> (21)); \
out |= ((temp & ((1UL) << 58)) >> (12)); \
out |= ((temp & ((1UL) << 57)) >> (3)); \
out |= ((temp & ((1UL) << 56)) << (6)); \
out |= ((temp & ((1UL) << 55)) >> (51)); \
out |= ((temp & ((1UL) << 54)) >> (42)); \
out |= ((temp & ((1UL) << 53)) >> (33)); \
out |= ((temp & ((1UL) << 52)) >> (24)); \
out |= ((temp & ((1UL) << 51)) >> (15)); \
out |= ((temp & ((1UL) << 50)) >> (6)); \
out |= ((temp & ((1UL) << 49)) << (3)); \
out |= ((temp & ((1UL) << 48)) << (12)); \
out |= ((temp & ((1UL) << 47)) >> (45)); \
out |= ((temp & ((1UL) << 46)) >> (36)); \
out |= ((temp & ((1UL) << 45)) >> (27)); \
out |= ((temp & ((1UL) << 44)) >> (18)); \
out |= ((temp & ((1UL) << 43)) >> (9)); \
out |= ((temp & ((1UL) << 42)) << (0)); \
out |= ((temp & ((1UL) << 41)) << (9)); \
out |= ((temp & ((1UL) << 40)) << (18)); \
out |= ((temp & ((1UL) << 39)) >> (39)); \
out |= ((temp & ((1UL) << 38)) >> (30)); \
out |= ((temp & ((1UL) << 37)) >> (21)); \
out |= ((temp & ((1UL) << 36)) >> (12)); \
out |= ((temp & ((1UL) << 35)) >> (3)); \
out |= ((temp & ((1UL) << 34)) << (6)); \
out |= ((temp & ((1UL) << 33)) << (15)); \
out |= ((temp & ((1UL) << 32)) << (24)); \
out |= ((temp & ((1UL) << 31)) >> (24)); \
out |= ((temp & ((1UL) << 30)) >> (15)); \
out |= ((temp & ((1UL) << 29)) >> (6)); \
out |= ((temp & ((1UL) << 28)) << (3)); \
out |= ((temp & ((1UL) << 27)) << (12)); \
out |= ((temp & ((1UL) << 26)) << (21)); \
out |= ((temp & ((1UL) << 25)) << (30)); \
out |= ((temp & ((1UL) << 24)) << (39)); \
out |= ((temp & ((1UL) << 23)) >> (18)); \
out |= ((temp & ((1UL) << 22)) >> (9)); \
out |= ((temp & ((1UL) << 21)) << (0)); \
out |= ((temp & ((1UL) << 20)) << (9)); \
out |= ((temp & ((1UL) << 19)) << (18)); \
out |= ((temp & ((1UL) << 18)) << (27)); \
out |= ((temp & ((1UL) << 17)) << (36)); \
out |= ((temp & ((1UL) << 16)) << (45)); \
out |= ((temp & ((1UL) << 15)) >> (12)); \
out |= ((temp & ((1UL) << 14)) >> (3)); \
out |= ((temp & ((1UL) << 13)) << (6)); \
out |= ((temp & ((1UL) << 12)) << (15)); \
out |= ((temp & ((1UL) << 11)) << (24)); \
out |= ((temp & ((1UL) << 10)) << (33)); \
out |= ((temp & ((1UL) << 9)) << (42)); \
out |= ((temp & ((1UL) << 8)) << (51)); \
out |= ((temp & ((1UL) << 7)) >> (6)); \
out |= ((temp & ((1UL) << 6)) << (3)); \
out |= ((temp & ((1UL) << 5)) << (12)); \
out |= ((temp & ((1UL) << 4)) << (21)); \
out |= ((temp & ((1UL) << 3)) << (30)); \
out |= ((temp & ((1UL) << 2)) << (39)); \
out |= ((temp & ((1UL) << 1)) << (48)); \
out |= ((temp & ((1UL) << 0)) << (57)); \
#define COMPUTE_P(out, in) \
out |= ((in & ((1UL) << 0)) << (11)); \
out |= ((in & ((1UL) << 1)) << (16)); \
out |= ((in & ((1UL) << 2)) << (3)); \
out |= ((in & ((1UL) << 3)) << (24)); \
out |= ((in & ((1UL) << 4)) << (21)); \
out |= ((in & ((1UL) << 5)) << (5)); \
out |= ((in & ((1UL) << 6)) << (14)); \
out |= ((in & ((1UL) << 7)) >> (7)); \
out |= ((in & ((1UL) << 8)) << (5)); \
out |= ((in & ((1UL) << 9)) << (12)); \
out |= ((in & ((1UL) << 10)) >> (7)); \
out |= ((in & ((1UL) << 11)) << (17)); \
out |= ((in & ((1UL) << 12)) << (17)); \
out |= ((in & ((1UL) << 13)) >> (6)); \
out |= ((in & ((1UL) << 14)) << (4)); \
out |= ((in & ((1UL) << 15)) << (9)); \
out |= ((in & ((1UL) << 16)) << (15)); \
out |= ((in & ((1UL) << 17)) << (5)); \
out |= ((in & ((1UL) << 18)) >> (6)); \
out |= ((in & ((1UL) << 19)) >> (13)); \
out |= ((in & ((1UL) << 20)) << (6)); \
out |= ((in & ((1UL) << 21)) >> (19)); \
out |= ((in & ((1UL) << 22)) >> (6)); \
out |= ((in & ((1UL) << 23)) >> (15)); \
out |= ((in & ((1UL) << 24)) >> (10)); \
out |= ((in & ((1UL) << 25)) << (5)); \
out |= ((in & ((1UL) << 26)) >> (22)); \
out |= ((in & ((1UL) << 27)) >> (8)); \
out |= ((in & ((1UL) << 28)) >> (27)); \
out |= ((in & ((1UL) << 29)) >> (20)); \
out |= ((in & ((1UL) << 30)) >> (15)); \
out |= ((in & ((1UL) << 31)) >> (8)); \
#define COMPUTE_EXPANSION_E(expB, Rin) \
expB |= ((R & ((1UL) << 31)) >> (31)); \
expB |= ((R & ((1UL) << 0)) << (1)); \
expB |= ((R & ((1UL) << 1)) << (1)); \
expB |= ((R & ((1UL) << 2)) << (1)); \
expB |= ((R & ((1UL) << 3)) << (1)); \
expB |= ((R & ((1UL) << 4)) << (1)); \
expB |= ((R & ((1UL) << 3)) << (3)); \
expB |= ((R & ((1UL) << 4)) << (3)); \
expB |= ((R & ((1UL) << 5)) << (3)); \
expB |= ((R & ((1UL) << 6)) << (3)); \
expB |= ((R & ((1UL) << 7)) << (3)); \
expB |= ((R & ((1UL) << 8)) << (3)); \
expB |= ((R & ((1UL) << 7)) << (5)); \
expB |= ((R & ((1UL) << 8)) << (5)); \
expB |= ((R & ((1UL) << 9)) << (5)); \
expB |= ((R & ((1UL) << 10)) << (5)); \
expB |= ((R & ((1UL) << 11)) << (5)); \
expB |= ((R & ((1UL) << 12)) << (5)); \
expB |= ((R & ((1UL) << 11)) << (7)); \
expB |= ((R & ((1UL) << 12)) << (7)); \
expB |= ((R & ((1UL) << 13)) << (7)); \
expB |= ((R & ((1UL) << 14)) << (7)); \
expB |= ((R & ((1UL) << 15)) << (7)); \
expB |= ((R & ((1UL) << 16)) << (7)); \
expB |= ((R & ((1UL) << 15)) << (9)); \
expB |= ((R & ((1UL) << 16)) << (9)); \
expB |= ((R & ((1UL) << 17)) << (9)); \
expB |= ((R & ((1UL) << 18)) << (9)); \
expB |= ((R & ((1UL) << 19)) << (9)); \
expB |= ((R & ((1UL) << 20)) << (9)); \
expB |= ((R & ((1UL) << 19)) << (11)); \
expB |= ((R & ((1UL) << 20)) << (11)); \
expB |= ((R & ((1UL) << 21)) << (11)); \
expB |= ((R & ((1UL) << 22)) << (11)); \
expB |= ((R & ((1UL) << 23)) << (11)); \
expB |= ((R & ((1UL) << 24)) << (11)); \
expB |= ((R & ((1UL) << 23)) << (13)); \
expB |= ((R & ((1UL) << 24)) << (13)); \
expB |= ((R & ((1UL) << 25)) << (13)); \
expB |= ((R & ((1UL) << 26)) << (13)); \
expB |= ((R & ((1UL) << 27)) << (13)); \
expB |= ((R & ((1UL) << 28)) << (13)); \
expB |= ((R & ((1UL) << 27)) << (15)); \
expB |= ((R & ((1UL) << 28)) << (15)); \
expB |= ((R & ((1UL) << 29)) << (15)); \
expB |= ((R & ((1UL) << 30)) << (15)); \
expB |= ((R & ((1UL) << 31)) << (15)); \
expB |= ((R & ((1UL) << 0)) << (47)); \
#define COMPUTE_PC2(subkey, roundKey) \
subkey |= ((roundKey & ((1UL) << 24)) >> (24)); \
subkey |= ((roundKey & ((1UL) << 27)) >> (26)); \
subkey |= ((roundKey & ((1UL) << 20)) >> (18)); \
subkey |= ((roundKey & ((1UL) << 6)) >> (3)); \
subkey |= ((roundKey & ((1UL) << 14)) >> (10)); \
subkey |= ((roundKey & ((1UL) << 10)) >> (5)); \
subkey |= ((roundKey & ((1UL) << 3)) << (3)); \
subkey |= ((roundKey & ((1UL) << 22)) >> (15)); \
subkey |= ((roundKey & ((1UL) << 0)) << (8)); \
subkey |= ((roundKey & ((1UL) << 17)) >> (8)); \
subkey |= ((roundKey & ((1UL) << 7)) << (3)); \
subkey |= ((roundKey & ((1UL) << 12)) >> (1)); \
subkey |= ((roundKey & ((1UL) << 8)) << (4)); \
subkey |= ((roundKey & ((1UL) << 23)) >> (10)); \
subkey |= ((roundKey & ((1UL) << 11)) << (3)); \
subkey |= ((roundKey & ((1UL) << 5)) << (10)); \
subkey |= ((roundKey & ((1UL) << 16)) >> (0)); \
subkey |= ((roundKey & ((1UL) << 26)) >> (9)); \
subkey |= ((roundKey & ((1UL) << 1)) << (17)); \
subkey |= ((roundKey & ((1UL) << 9)) << (10)); \
subkey |= ((roundKey & ((1UL) << 19)) << (1)); \
subkey |= ((roundKey & ((1UL) << 25)) >> (4)); \
subkey |= ((roundKey & ((1UL) << 4)) << (18)); \
subkey |= ((roundKey & ((1UL) << 15)) << (8)); \
subkey |= ((roundKey & ((1UL) << 54)) >> (30)); \
subkey |= ((roundKey & ((1UL) << 43)) >> (18)); \
subkey |= ((roundKey & ((1UL) << 36)) >> (10)); \
subkey |= ((roundKey & ((1UL) << 29)) >> (2)); \
subkey |= ((roundKey & ((1UL) << 49)) >> (21)); \
subkey |= ((roundKey & ((1UL) << 40)) >> (11)); \
subkey |= ((roundKey & ((1UL) << 48)) >> (18)); \
subkey |= ((roundKey & ((1UL) << 30)) << (1)); \
subkey |= ((roundKey & ((1UL) << 52)) >> (20)); \
subkey |= ((roundKey & ((1UL) << 44)) >> (11)); \
subkey |= ((roundKey & ((1UL) << 37)) >> (3)); \
subkey |= ((roundKey & ((1UL) << 33)) << (2)); \
subkey |= ((roundKey & ((1UL) << 46)) >> (10)); \
subkey |= ((roundKey & ((1UL) << 35)) << (2)); \
subkey |= ((roundKey & ((1UL) << 50)) >> (12)); \
subkey |= ((roundKey & ((1UL) << 41)) >> (2)); \
subkey |= ((roundKey & ((1UL) << 28)) << (12)); \
subkey |= ((roundKey & ((1UL) << 53)) >> (12)); \
subkey |= ((roundKey & ((1UL) << 51)) >> (9)); \
subkey |= ((roundKey & ((1UL) << 55)) >> (12)); \
subkey |= ((roundKey & ((1UL) << 32)) << (12)); \
subkey |= ((roundKey & ((1UL) << 45)) >> (0)); \
subkey |= ((roundKey & ((1UL) << 39)) << (7)); \
subkey |= ((roundKey & ((1UL) << 42)) << (5)); \
#define COMPUTES_LOOKUP(k, sout, expandedBlock) \
sout |= S_TABLE[k * 64 + ((expandedBlock >> (6 * k)) & 0x3F)] << (4 * k); \
/* This is the host code
#define COMPUTES_LOOKUP(k, sout, expandedBlock) \
sout |= table_DES_S[k][(expandedBlock >> (6 * k)) & 0x3F] << (4 * k); \
*/
/*
uint32_t COMPUTE_F(uint32_t fout, uint32_t R, uint64_t roundKey) {
uint64_t expandedBlock = 0UL, subkey = 0UL;
uint32_t sout = 0;
int i, k;
COMPUTE_EXPANSION_E(expandedBlock, R)
printf("expanded E is : \n");
print_bits_array(expandedBlock);
COMPUTE_PC2(subkey, roundKey)
printf("subkey is :\n");
print_bits_array(subkey);
expandedBlock ^= subkey;
// Mask expandedBlock
expandedBlock = MASK48(expandedBlock);
printf("Expanded E is :\n");
print_bits_array(expandedBlock);
for (k = 0; k < 8; k++) {
COMPUTES_LOOKUP(k, sout, expandedBlock)
printf("sout @ %d is :\n", k);
print_bits_array(sout);
}
COMPUTE_P(fout, sout)
printf("fout is :\n");
print_bits_array(fout);
printf("sout is :\n");
print_bits_array(sout);
return fout;
}
*/
#define ROTATE_ROUND_KEY_LEFT(roundK) \
uint64_t bit27 = ((roundK & ((1UL) << 27)) >> 27);\
uint64_t bit55 = ((roundK & ((1UL) << 55)) >> 27);\
roundK <<= 1; \
temp = roundK & 0x00FFFFFFEFFFFFFE; \
roundK = temp | bit27 | bit55; \
#define EXCHANGE_L_AND_R(L, R) \
temp = L; \
L = R; \
R = temp; \
__global__ void EncryptDES_device(uint64_t in, uint64_t expected, uint64_t* result, uint64_t bound) {
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
uint64_t key = threadId * bound;
uint64_t counter = 0;
while (counter != bound) {
uint32_t R = 0, L = 0;
uint64_t roundKey = 0UL, out = 0UL, temp = 0UL;
COMPUTE_ROUND_KEY(roundKey, key)
COMPUTE_IP(L, R, in)
for (int round = 0; round < 16; round++) {
uint64_t expandedBlock = 0UL, subkey = 0UL;
uint32_t sout = 0;
uint32_t fout = 0;
ROTATE_ROUND_KEY_LEFT(roundKey)
if (round != 0 && round != 1 && round != 8 && round != 15) {
ROTATE_ROUND_KEY_LEFT(roundKey)
}
COMPUTE_EXPANSION_E(expandedBlock, R)
COMPUTE_PC2(subkey, roundKey)
expandedBlock ^= subkey;
expandedBlock = MASK48(expandedBlock);
for (int i = 0; i < 8; i++) {
COMPUTES_LOOKUP(i, sout, expandedBlock)
}
COMPUTE_P(fout, sout)
L ^= fout;
EXCHANGE_L_AND_R(L, R)
}
EXCHANGE_L_AND_R(L, R)
COMPUTE_FP(out, L, R)
if (out == expected) {
*result = out;
// asm("trap;");
}
counter++;
key++;
}
__syncthreads();
}
/*
void EncryptDES_host(uint64_t key, uint64_t in, uint64_t expected) {
uint32_t R = 0, L = 0;
uint64_t roundKey = 0UL, out = 0UL, temp = 0UL;
printf("sizeof(unsigned long long) is %d\n", sizeof(unsigned long long));
COMPUTE_ROUND_KEY(roundKey, key)
printf("roundKey is: \n");
print_bits_array(roundKey);
COMPUTE_IP(L, R, in)
printf("after IP is: \n");
printf("\t L:\n");
print_bits_array(L);
printf("\t R:\n");
print_bits_array(R);
for (int round = 0; round < 16; round++) {
uint64_t expandedBlock = 0UL, subkey = 0UL;
uint32_t sout = 0;
uint32_t fout = 0;
printf("------------------------- ROUND %d ----------------------\n", round);
ROTATE_ROUND_KEY_LEFT(roundKey)
printf("\t roundKey:\n");
print_bits_array(roundKey);
if (round != 0 && round != 1 && round != 8 && round != 15) {
ROTATE_ROUND_KEY_LEFT(roundKey)
}
COMPUTE_EXPANSION_E(expandedBlock, R)
printf("expanded E is : \n");
print_bits_array(expandedBlock);
COMPUTE_PC2(subkey, roundKey)
printf("subkey is :\n");
print_bits_array(subkey);
expandedBlock ^= subkey;
// Mask expandedBlock
expandedBlock = MASK48(expandedBlock);
printf("Expanded E is :\n");
print_bits_array(expandedBlock);
for (int i = 0; i < 8; i++) {
Comment out for compilation of the device code
COMPUTES_LOOKUP(i, sout, expandedBlock)
printf("sout @ %d is :\n", i);
print_bits_array(sout);
}
COMPUTE_P(fout, sout)
printf("fout is :\n");
print_bits_array(fout);
printf("sout is :\n");
print_bits_array(sout);
printf("f is : \n");
print_bits_array(fout);
L ^= fout;
printf("L^f is : \n");
print_bits_array(L);
EXCHANGE_L_AND_R(L, R)
printf("------------------------- ROUND %d end ------------------\n", round);
}
EXCHANGE_L_AND_R(L, R)
COMPUTE_FP(out, L, R)
printf("FP out is \n");
print_bits_array(out);
}
*/
int main(int argc, char **argv) {
uint64_t random_o = 0xF77D7F53F77D7F53;
// uint64_t random_k = 0x2FEABF912FEABF;
uint64_t expected = 0xDF86B0B30BD2530A;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
uint64_t *result_host = (uint64_t *)calloc(1, sizeof(uint64_t));
uint64_t *result_device;
cudaMalloc(&result_device, sizeof(uint64_t));
cudaMemcpy(result_device, result_host, sizeof(uint64_t), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(S_TABLE, table_DES_S, CONSTANT_SIZE);
int threads = MAX_THREADS_1D / 2;
int blocks = (MAX_BLOCKS_1D - 1);
uint64_t overall_total = 0x0FFFFFFFFFFFFFFFULL;
uint64_t target_total = 0xFFFFFFFFFFULL;
dim3 dimGrid(blocks, blocks);
dim3 dimBlock(threads, threads);
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
EncryptDES_device<<<dimGrid, dimBlock>>>(random_o, expected, result_device, (target_total / (blocks * blocks * threads * threads)));
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Cuda Execution Report: \n");
printf("Targetting number of testing key - %" PRIu64 "\n", target_total);
printf("Time spent %0.8f ms\n", milliseconds);
printf("\n");
printf("Estimated time to crack DES is %0.8f ms\n", (overall_total * 1.0 / target_total) * milliseconds);
cudaMemcpy(result_host, result_device, sizeof(uint64_t), cudaMemcpyDeviceToHost);
if (*result_host != 0x0)
printf("Key found: %lX\n", *result_host);
free(result_host);
cudaFree(result_device);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
11,814 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
int* read_array(const char* filename, int len) {
int *x = (int*) malloc(len * sizeof(int));
FILE *fp = fopen(filename, "r");
for (int i = 0; i < len; i++) {
fscanf(fp, "%d", &x[i]);
}
fclose(fp);
return x;
}
int main(int argc, char *argv[]) {
if (argc != 1) {
printf("Invalid argument Usage: ./problem1");
return -1;
}
const int rowWidth=32;
const int colWidth=16;
int *hA = read_array("inputA.inp",rowWidth*colWidth );
int *hB = read_array("inputB.inp", rowWidth);
int *hC = (int*) malloc(colWidth * sizeof(int));
int *refC;
// TODO - allocate host memory for refC (you have to figure out how much)
// The skeleton currently segfaults because refC is accessed without allocation
// TODO do a reference host implementation (Ch) here. ie populate answer in refC
int *dA, *dB, *dC;
// TODO allocate device memory for dA,dB and dC
// TODO copy data from host to GPU
// TODO call your kernel
// TODO copyback results
float Error=0;
for(int i=0;i<colWidth;i++)
Error+=(hC[i]-refC[i])*(hC[i]-refC[i]);
printf("%f\n%d",sqrt(Error),hC[colWidth-1]);
free(refC);
free(hB);
free(hA);
return 0;
}
|
11,815 | /******************************************
*******************************************/
#include <time.h>
#include <stdio.h>
#include <limits.h>
#define BLOCK_SIZE 32
void run_test();
//----------
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// vector broadcast general except first dimension(x)
__global__ void broadcast_multi(float *x,float *y, float *z,
int *stride_x, int *stride_y,
int *stride_z,int N_z, int dimlen_z) {
// ,int dimlen_z
// const int dimlen_z=3;
int index_z = threadIdx.x + (blockIdx.x * blockDim.x);
int index_x,index_y;
// int coords[dimlen_z];
// int* coords = (int*)malloc(sizeof(int)*dimlen_z);
int coords;
int temp_index;
while (index_z < N_z) {
temp_index = index_z;
index_x =0;
index_y = 0;
// n multiple of 2
// (i/n) == (i>>log2(n))
// we can place foo%n==foo&(n-1) if second dim is multiple of 2
//try unsigned int
for (int i=dimlen_z-1; i>0; i--)
{
coords = temp_index / (stride_z[i]);
index_x+= stride_x[i]*coords;
index_y+= stride_y[i]*coords;
temp_index = temp_index %(stride_z[i]);
}
index_x+= temp_index;
index_y+= temp_index;
// for (int i=dimlen_z-1; i>=0; i--)
// {
// coords = temp_index /(stride_z[i]);
// index_x+= stride_x[i]*coords;
// index_y+= stride_y[i]*coords;
// temp_index = temp_index &(stride_z[i]-1);
// }
// index_x =0;
// index_y = 0;
// z[index_z] = x[index_x]+y[index_y];
z[index_z] = x[index_z]+y[index_z];
index_z+=(blockDim.x * gridDim.x);
}
}
int main(void)
{
// int A_dimsizes[5] = {28,28,1344,4,4};
// int A_len = 5;
// int brdcastdim = 3;
// for(int i = 0; i<10; i++)
{
run_test();
}
// run_test(100,10,1);
}
void run_test()
{
// int stride_A[3] = {1,1,64};
// // row stride
// int stride_B[3] = {1,10,0};
// int stride_Z[3] = {1,10,640};
// int dimlen_z = 3;
//
const int A_len = 6;
int A_dimsizes[A_len] = {50,50,20,40,4,4};
int B_dimsizes[A_len] = {50,50,20,40,4,4};
int A_strides[A_len];
int A_N=1;
A_strides[0]=1;
int k=1;
for (int i=0; i<A_len-1; i++)
{
k*=A_dimsizes[i];
A_strides[i+1]=k;
A_N*=A_dimsizes[i];
}
A_N*=A_dimsizes[A_len-1];
// int B_dimsizes[A_len] = {28,28,1344,4,4};
int B_len = A_len;
int B_strides[B_len];
int B_N=1;
B_strides[0]=1;
k=1;
for (int i=0; i<B_len-1; i++)
{
k*=B_dimsizes[i];
B_strides[i+1]=k;
B_N*=B_dimsizes[i];
}
B_N*=B_dimsizes[A_len-1];
for (int i=0; i<B_len; i++)
{
if (B_dimsizes[i]!=A_dimsizes[i])
{
if (B_dimsizes[i]==1)
{
B_strides[i]=0;
}
else{
A_strides[i]=0;
}
}
}
int C_N = 1;
int C_dimsizes[A_len];
for (int i=0; i<B_len; i++)
{
if (B_dimsizes[i]>=A_dimsizes[i])
{
C_dimsizes[i]=B_dimsizes[i];
C_N*=B_dimsizes[i];
}
else{
C_dimsizes[i]=A_dimsizes[i];
C_N*=A_dimsizes[i];
}
}
int C_strides[A_len];
C_strides[0]=1;
k=1;
for (int i=0; i<A_len-1; i++)
{
k*=C_dimsizes[i];
C_strides[i+1]=k;
}
// C_N*=C_dimsizes[A_len-1];
// we will broadcast to third dim (z)
// // int brdcastdim = 3;
// int brdcastdimstride = A_strides[brdcastdim-1];
// int brdcastnextstride = A_strides[brdcastdim];
// int multidimsize=1;
// for (int i=brdcastdim; i<A_len; i++)
// {
// multidimsize*=A_dimsizes[i];
// }
// int B_N=A_dimsizes[brdcastdim-1];
float *A, *B,*C, *d_A, *d_B, *d_C;
int *d_stride_A, *d_stride_B, *d_stride_C;
A = (float*)malloc(A_N*sizeof(float));
B = (float*)malloc(B_N*sizeof(float));
C = (float*)malloc(C_N*sizeof(float));
cudaMalloc(&d_A, A_N*sizeof(float));
cudaMalloc(&d_B, B_N*sizeof(float));
cudaMalloc(&d_C, C_N*sizeof(float));
cudaMalloc(&d_stride_A, B_len*sizeof(int));
cudaMalloc(&d_stride_B, B_len*sizeof(int));
cudaMalloc(&d_stride_C, B_len*sizeof(int));
// cudaMalloc(&d_coords, A_len*sizeof(int));
for (int i = 0; i < A_N; i++) {
A[i] = 1;
}
for (int i = 0; i < B_N; i++) {
B[i] = 2;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
gpuErrchk(cudaMemcpy(d_A, A, A_N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_B, B, B_N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_C, C, C_N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_stride_A, A_strides, A_len*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_stride_B, B_strides, A_len*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_stride_C, C_strides, A_len*sizeof(int), cudaMemcpyHostToDevice));
cudaEventRecord(start);
int ITER=10;
for (int i=0; i<ITER; i++)
{
broadcast_multi<<<256,256>>>(d_A,d_B, d_C,
d_stride_A, d_stride_B,
d_stride_C,C_N,A_len);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
milliseconds = milliseconds/ITER;
gpuErrchk(cudaMemcpy(C, d_C, C_N*sizeof(float), cudaMemcpyDeviceToHost));
int maxError = 0;
for (int i = 0; i < C_N; i++)
{
if(C[i]!=3)
{
maxError+=1;
printf("error value %d index %d \n",C[i],i);
}
}
for (int i = 0; i < A_len; i++)
{
printf("Astride %d : %d \n",i,A_strides[i]);
}
for (int i = 0; i < A_len; i++)
{
printf("Bstride %d : %d \n",i,B_strides[i]);
}
for (int i = 0; i < A_len; i++)
{
printf("Cstride %d : %d \n",i,C_strides[i]);
}
for (int i = 0; i < A_len; i++)
{
printf("C_dimsizes %d : %d \n",i,C_dimsizes[i]);
}
if (maxError!=0)
{
printf("Max error: %d\n", maxError);
}
double total_data = (((double)A_N)+(double)B_N+(double)C_N)*4;
double gflop = (A_N)/(milliseconds*1e6);
// printf("total data: %f\n size1 %f\n size2\n", total_data,size1);
// printf("milliseconds: %f\n", milliseconds);
//gflop
// printf("%f\n", gflop);
// Effective Bandwidth
printf("Effective Bandwidth %f\n", total_data/milliseconds/1e6);
} |
11,816 |
// ParallelSPSS.Form1
extern "C" __global__ void addVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N);
// ParallelSPSS.Form1
extern "C" __global__ void addVector( float* a, int aLen0, float* b, int bLen0, float* c, int cLen0, int N)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < N; i += gridDim.x)
{
c[(i)] = a[(i)] + b[(i)];
}
}
|
11,817 | #include <iostream>
#include <stdio.h>
#include <cstdlib>
using namespace std;
__global__ void verticalOperation(int size, float *deviceArray, float *deviceResult) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int blockSize = blockDim.x; //NOTE: this would also be the amount of values in shared memory (or it should be anyways)
//allocated shared memory to reduce global memory access overhead
extern __shared__ float sdata[];
//move each value from deviceArray pointer into shared_memory_array
sdata[threadIdx.x] = deviceArray[index];
__syncthreads();
//stride is currently the length of the unsorted array that still needs to be compared
for (int stride = blockSize; stride >= 1; stride /= 2) {
if (threadIdx.x < stride/2) {
if (sdata[threadIdx.x + (stride/2)] > sdata[threadIdx.x]) {
sdata[threadIdx.x] = sdata[threadIdx.x + (stride/2)];
}
}
__syncthreads();
}
if (threadIdx.x == 0) { deviceResult[blockIdx.x] = sdata[0]; }
//stride is currently the length of the unsorted array that still needs to be compared
for (int stride = gridDim.x; stride >= 1; stride /= 2) {
if (index < stride/2) {
if (deviceResult[index + (stride/2)] > deviceResult[index]) {
deviceResult[index] = deviceResult[index + (stride/2)];
}
}
}
}
void testVerticalOperation() {
int number_of_values = 1 << 18;
int memSize = number_of_values*sizeof(float);
int blockSize = 256;
int numBlocks = (number_of_values/blockSize)+1;
float *deviceValue, *deviceResult; //device copies
float initialValue[number_of_values], result[number_of_values]; //host copies
for (int x = 0; x < number_of_values; x++) {
initialValue[x] = 0.0f;
}
initialValue[2] = 500.0f;
initialValue[3] = 600.0f;
initialValue[66] = 998.0f;
initialValue[30000] = 1000.0f;
//Allocates "Unified Memory" which is accessible from both the CPU and GPU.
cudaError_t cudaMallocErr1 = cudaMalloc(&deviceValue, memSize);
if (cudaMallocErr1 != cudaSuccess) {
cout << "CUDA Error" << endl;
}
//Allocates "Unified Memory" which is accessible from both the CPU and GPU.
cudaError_t cudaMallocErr2 = cudaMalloc(&deviceResult, memSize);
if (cudaMallocErr2 != cudaSuccess) {
cout << "CUDA Error" << endl;
}
//copy memory to device from host and print error if found
cudaError_t cudaMemcpy1Err = cudaMemcpy(deviceValue, &initialValue, memSize, cudaMemcpyHostToDevice);
if (cudaMemcpy1Err != cudaSuccess) {
cout << "Memcpy to Device Error: " << cudaMemcpy1Err << endl;
}
verticalOperation<<<numBlocks, blockSize, memSize/blockSize>>>(number_of_values, deviceValue, deviceResult);
//Forces CPU to wait for GPU to finish before accessing
cudaDeviceSynchronize();
//copy memory to host from device and print error if found
cudaError_t cudaMemcpy2Err = cudaMemcpy(&result, deviceResult, memSize, cudaMemcpyDeviceToHost);
if (cudaMemcpy2Err != cudaSuccess) {
cout << "Memcpy to Host Error: " << cudaMemcpy2Err << endl;
}
cout << result[0] << endl;
cout << "Done!" << endl;
// Free memory
cudaFree(deviceValue);
cudaFree(deviceResult);
}
int main() {
//Runs test for verticalOperation kernal on GPU
testVerticalOperation();
return 0;
}
|
11,818 | #include "includes.h"
__global__ void mandelKernel(double planoFactorXd, double planoFactorYd, double planoVxd, double planoVyd, int maxIteracionesd, unsigned int *coloresd, int img_width, int img_height, int num_processes, int my_pid, int rw) {
int columna, fila;
double X, Y;
double pReal = 0.0;
double pImag = 0.0;
double pRealAnt, pImagAnt, distancia;
// Determine pixel
columna = blockIdx.x * blockDim.x + threadIdx.x;
fila = (rw * MAX_ROWS_PER_KERNEL) + (blockIdx.y * blockDim.y) + threadIdx.y;
int real_row = (fila * num_processes) + my_pid;
if(real_row >= img_height)
return;
// Real pixel coords
X = (planoFactorXd * (double)columna) + planoVxd;
Y = (planoFactorYd * ((double)(img_height - 1) - (double)real_row)) + planoVyd;
int i = 0;
do {
pRealAnt = pReal;
pImagAnt = pImag;
pReal = ((pRealAnt * pRealAnt) - (pImagAnt * pImagAnt)) + X;
pImag = (2.0 * (pRealAnt * pImagAnt)) + Y;
i++;
distancia = pReal*pReal + pImag*pImag;
}while ((i < maxIteracionesd) && (distancia <= 4.0));
if(i == maxIteracionesd) i = 0;
coloresd[(fila * img_width) + columna] = i;
} |
11,819 | #include<stdio.h>
#include<math.h>
#define N 8
__global__ void exclusive_scan(int *d_in){
//Phase 1 (Uptree)
int s = 1;
for(; s<=N-1; s<<=1){
int i = 2*s*(threadIdx.x+1)-1;
if((i-s >= 0) && (i<N)) {
//printf("s = %d, i= %d \n", s, i);
int a = d_in[i];
int b = d_in[i-s];
__syncthreads();
d_in[i] = a+b;
//printf("Write in[%d] = %d\n", i, a+b);
}
__syncthreads();
}
//Phase 2 (Downtree)
if(threadIdx.x == 0)
d_in[N-1] = 0;
for(s = s/2; s >= 1; s>>=1){
int i = 2*s*(threadIdx.x+1)-1;
if((i-s >= 0) && (i<N)) {
//printf("s = %d, i= %d \n", s, i);
int r = d_in[i];
int l = d_in[i-s];
__syncthreads();
d_in[i] = l+r;
d_in[i-s] = r;
__syncthreads();
//printf("Write in[%d] = %d\n", i, a+b);
}
__syncthreads();
}
}
int main(){
int h_in[N];
int h_out[N];
h_in[0] = 3;
h_in[1] = 1;
h_in[2] = 7;
h_in[3] = 0;
h_in[4] = 4;
h_in[5] = 1;
h_in[6] = 6;
h_in[7] = 3;
int *d_in;
//int *d_out;
cudaMalloc((void**) &d_in, N*sizeof(int));
//cudaMalloc((void**) &d_out, N*sizeof(int));
cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice);
//Implementing kernel call
exclusive_scan<<<1, 4>>>(d_in);
cudaMemcpy(&h_out, d_in, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
printf("out[%d] = %d\n", i, h_out[i]);
cudaFree(d_in);
return -1;
}
|
11,820 | #pragma once
#include <iostream>
#include <sstream>
#include <stdexcept>
template <typename T, int TD_T, int BLOCK_ROWS>
__global__ void transposeNoBankConflicts(T *odata, T *idata, int width, int height)
{
__shared__ T tile[TD_T][TD_T+1];
int xIndex = blockIdx.x * TD_T + threadIdx.x;
int yIndex = blockIdx.y * TD_T + threadIdx.y;
int index_in = xIndex + yIndex * width;
xIndex = blockIdx.y * TD_T + threadIdx.x;
yIndex = blockIdx.x * TD_T + threadIdx.y;
int index_out = xIndex + (yIndex) * height;
if ( xIndex < height && yIndex < width)
{
for (int i=0; i<TD_T; i+=BLOCK_ROWS)
{
tile[threadIdx.y + i][threadIdx.x] = idata[ index_in + i * width];
}
__syncthreads();
for ( int i = 0; i < TD_T; i += BLOCK_ROWS)
{
odata[ index_out + i * height] = tile[ threadIdx.x ][ threadIdx.y + i ];
}
}
}
template <typename T>
void transpose_2( size_t height, size_t width, T* idata, T* odata, int tile_dimension,cudaStream_t stream )
{
int gridx=width / tile_dimension;
if ( width % tile_dimension != 0)
{
++ gridx;
std::stringstream ss;
ss << "Transpose 2: Width " << width << " is not divisible by tile dimension: " << tile_dimension << ". Aborting\n";
throw std::runtime_error( ss.str() );
}
int gridy = height/tile_dimension;
if ( height % tile_dimension != 0)
{
++ gridy;
std::stringstream ss;
ss << "Transpose 2: Height " << height<< " is not divisible by tile dimension: " << tile_dimension << ". Aborting\n";
throw std::runtime_error( ss.str() );
}
dim3 grid( gridx, gridy),
threads( tile_dimension, tile_dimension );
switch (tile_dimension)
{
case 2:
transposeNoBankConflicts<T,2,2><<<grid, threads,0,stream>>>(odata, idata, width, height);
break;
case 4:
transposeNoBankConflicts<T,4,4><<<grid, threads,0,stream>>>(odata, idata, width, height);
break;
case 8:
transposeNoBankConflicts<T,8,8><<<grid, threads,0,stream>>>(odata, idata, width, height);
break;
case 16:
transposeNoBankConflicts<T,16,16><<<grid, threads,0,stream>>>(odata, idata, width, height);
break;
/* case 24:
transposeNoBankConflicts<T,24,24><<<grid, threads>>>(odata, idata, width, height);
break;
case 32:
transposeNoBankConflicts<T,32,32><<<grid, threads>>>(odata, idata, width, height);
break;
case 64:
transposeNoBankConflicts<T,64,64><<<grid, threads>>>(odata, idata, width, height);
case 128:
transposeNoBankConflicts<T,128,128><<<grid, threads>>>(odata, idata, width, height);
*/ default:
std::cerr << "Tile Dimension: " << tile_dimension << " not supported. Aborting\n";
exit( -1 );
}
}
|
11,821 | #include "includes.h"
__global__ void reduceSmem(int *g_idata, int *g_odata, unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// boundary check
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
} |
11,822 | // Blocks and Threads
// blockIdx.x-> 0 | 1 | 2
// threadIdx.x-> [0,1,2,3,4,5,0,1,2,3,4,5,0,1,2,3,4,5]
//
// blockDim -> Block dimension = number of threads per block
//
// index = threadIdx + (blockIdx.x * M)
#include <stdio.h>
#include <time.h>
#define N 2048*2048
#define M 512 // THREADS_PER_BLOCK
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < n) // avoid accessing beyond end of array, when not perfect multiples.
c[index] = a[index] + b[index];
}
void random_ints(int *a, int n){
int i;
for (i = 0; i < n; ++i)
a[i] = rand()%100;
}
int main(void) {
int *a, *b, *c; // host copy
int *d_a, *d_b, *d_c; // device copy
int size = N * sizeof(int);
// allocate mem for device copies
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
clock_t start, end;
double cpu_time_used;
start = clock();
// copy inputs to device
// cudaMemcpy(destination, source, size, direction);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel on GPU with N threads
add<<<(N + M-1)/M, M>>>(d_a, d_b, d_c, N);
// copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("exec time: %f seconds\n", cpu_time_used);
// cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
11,823 | #include "stdio.h"
#define N 67000
#define MIN(a,b) (a < b?a:b )
__global__ void add(int *a, int *b, int *c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x; //El id del thread es el id que tiene ese thread dentro de un bloque
c[tid]=a[tid]+b[tid]; //El id del bloque es el id que tiene ese bloque dentro del grid
} //La dimension del bloque es el numero de threads que tiene cada bloque
int main()
{
int a[N], b[N], c[N];//host
int *dev_a, *dev_b, *dev_c;//device
cudaMalloc((void**)&dev_a, N*sizeof(int) );
cudaMalloc((void**)&dev_b, N*sizeof(int) );
cudaMalloc((void**)&dev_c, N*sizeof(int) );
for (int i = 0; i < N; i++){
a[i] = i,
b[i] = 1;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); //host to device
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
//Calculamos el máximo divisor menor o igual a 512 de N
//Podemos hacer esto o hacer que los threads que obtengan un tid mayor a N no modifiquen el vector
int threads_block = MIN(512,N);
while(N%threads_block != 0)--threads_block;
int blocks = N / threads_block;
add<<<blocks,threads_block>>>(dev_a,dev_b,dev_c);
//Call CUDA kernel
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);//Copy memory from device to host
//copy array to host
for (int i = 0; i < N; i++)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
cudaFree(dev_a);//free device mem
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
11,824 | /*author: Zeke Elkins
*date: 3/27/14
*description: a CUDA program to add two vectors of integers
*/
#define N 512 //the size of the vector
#include <iostream>
using namespace std;
//device code to add two arrays of numbers;
//each block handles a different element of the array
__global__ void add(int *a, int *b, int *c) {
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
//method that will generate n random integers between 0 and 99
//values are stored in the int array a
void random_ints(int* a, int n) {
int i;
for (i=0; i<n; ++i) {
a[i] = rand() % 100;
}
}
int main(void) {
int *a, *b, *c; //host copies of a, b, c NOTE THESE ARE NOW POINTERS
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N * sizeof(int); //size of N integers
//allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//allocate space for host copies of a, b, c and setup input vals
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
//copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// launch add() kernel on GPU with N threads
add<<<1,N>>>(d_a, d_b, d_c);
//copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
//print out the results
for (int i=0; i<N; i++) {
cout << a[i] << " plus " << b[i] << " equals " << c[i] << endl;
}
//cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
11,825 | #include <iostream>
#include <cstring>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <algorithm>
#include <unistd.h>
// NOTE: Need to compile in C++11 mode, add -std=c++11
// These should eventually be specifiable from R
#define TAU 1
#define V_THRESH 1.5
#define THREADS_PER_BLOCK 512
// Integrated Postsynaptic Kernel
__host__ __device__
double ipostkern(double dt) {
if (dt < 0) {
return(0);
}
return(TAU * (1 - exp(-dt / TAU)));
}
// Postsynaptic Kernel
__host__ __device__
double postkern(double dt) {
if (dt < 0) {
return(0);
}
return(exp(-dt / TAU));
}
// Postsynaptic Kernel
__host__ __device__
double dpostkern(double dt) {
if (dt < 0) {
return(0);
}
return((-1.0) / TAU * exp(-dt / TAU));
}
// Integrated refractory kernel.
__host__ __device__
double iprekern(double dt) {
if (dt < 0) {
return(0);
}
return(-V_THRESH);
}
// The inner product function, uses the standard R^n inner product.
__host__ __device__
double inner_prod(double *x, double *y, int n) {
double sum = 0;
for (int i = 0; i < n; i++) {
sum += x[i] * y[i];
}
return(sum);
}
__global__
void par_c_main_loop(double ***ALPHA, double ***Fcal, int **f_count, double ***Ws, int* net_shape, int n_layers,
int t_steps, double t_eps, int l, double ****GAMMA, double ****GAMMAd, const bool copy_gamma) {
double t;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int n = index; n < net_shape[l]; n += stride) {
t = 0;
for (int ti = 0; ti < t_steps; ti++) {
// Calculate total postsynaptic contribution
int n_f = f_count[l][n];
double psc = 0;
for (int tfi = 0; tfi < n_f; tfi++) {
double tf = Fcal[l][n][tfi];
psc += ipostkern(t - tf);
}
ALPHA[l][ti][n] = psc;
if (l > 0) {
// Update refractory contribution
n_f = f_count[l][n];
double refr = 0;
for (int tfi = 0; tfi < n_f; tfi++) {
double tf = Fcal[l][n][tfi];
refr += iprekern(t - tf);
}
// Update potential
double V_n = inner_prod(Ws[l-1][n], ALPHA[l-1][ti], net_shape[l-1]) + refr;
//printf("l = %d, n = %d, ti = %d", l, n, ti);
//printf("Vsl = %d, n = %d, ti = %d", l, n, ti);
// Check for firing neurons
if (V_n > V_THRESH) {
// If an output fire, record the neural state
if (copy_gamma && l == n_layers-1) {
for (int l1 = 0; l1 < n_layers; l1++) {
for (int h = 0; h < net_shape[l1]; h++) {
GAMMA[n][f_count[l][n]][l1][h] = 0;
GAMMAd[n][f_count[l][n]][l1][h] = 0;
for (int ti = 0; ti < f_count[l1][h]; ti++) {
double tf = Fcal[l1][h][ti];
GAMMA[n][f_count[l][n]][l1][h] += postkern(t + t_eps - tf);
GAMMAd[n][f_count[l][n]][l1][h] += dpostkern(t + t_eps - tf);
}
}
}
}
Fcal[l][n][f_count[l][n]] = t + t_eps;
f_count[l][n]++;
}
}
t += t_eps;
}
}
}
// The main simulation, using armadillo for matrix multiplication, and organized in such a way that we solve a sequence embarassingly parallelizable problems.
double **par_sim_body_c(int *net_shape, const int n_layers,
double **Fin, int *f_count_in, long long int **f_max, double ***Ws,
int** f_count, const int t_steps, const double t_eps, double ****GAMMA, double ****GAMMAd, const int debug, const bool copy_gamma) {
// Get the layer with the most neurons
int max_neur = 0;
for (int l = 0; l < n_layers; l++) {
if (max_neur < net_shape[l]) {
max_neur = net_shape[l];
}
}
// ALPHA stores integrated postsynaptic potential in column major order.
// OMEGA stores integrated refractory contribution in row major order.
//double ***ALPHA = (double ***)calloc(n_layers, sizeof(double**));
//double ***OMEGA = (double ***)calloc(n_layers-1, sizeof(double**));
double ***ALPHA, ***OMEGA;
cudaMallocManaged(&ALPHA, n_layers * sizeof(double**));
cudaMallocManaged(&OMEGA, (n_layers-1) * sizeof(double**));
for (int i = 0; i < n_layers; i++) {
double **ALPHAi;
cudaMallocManaged(&ALPHAi, t_steps * sizeof(double*));
ALPHA[i] = ALPHAi;
//ALPHA[i] = (double **) calloc(t_steps, sizeof(double*));
for (int j = 0; j < t_steps; j++) {
double *ALPHAij;
cudaMallocManaged(&ALPHAij, net_shape[i] * sizeof(double));
ALPHA[i][j] = ALPHAij;
//ALPHA[i][j] = (double *) calloc(net_shape[i], sizeof(double));
}
if (i > 0) {
double **OMEGAi;
cudaMallocManaged(&OMEGAi, net_shape[i] * sizeof(double*));
OMEGA[i-1] = OMEGAi;
//OMEGA[i-1] = (double **) calloc(net_shape[i], sizeof(double*));
for (int j = 0; j < net_shape[i]; j++) {
double *OMEGAij;
cudaMallocManaged(&OMEGAij, t_steps * sizeof(double));
OMEGA[i-1][j] = OMEGAij;
//OMEGA[i-1][j] = (double *) calloc(t_steps, sizeof(double));
}
}
}
if (debug >= 1)
printf("After ALPHA\n");
// Storage for firing times
//double ***u_Fcal = (double ***)calloc(n_layers, sizeof(double**));
double ***u_Fcal;
cudaMallocManaged(&u_Fcal, n_layers * sizeof(double**));
// Copy input spike times to unified memory.
double **u_Fin;
cudaMallocManaged(&u_Fin, net_shape[0] * sizeof(double*));
for (int n = 0; n < net_shape[0]; n++) {
double *u_Finn;
cudaMallocManaged(&u_Finn, f_count_in[n] * sizeof(double));
cudaMemcpy(u_Finn, Fin[n], f_count_in[n] * sizeof(double), cudaMemcpyDefault);
u_Fin[n] = u_Finn;
}
if (debug >= 1)
printf("After inputs \n");
//int **myarr = (int **)malloc(2*sizeof(int *));
//myarr[0] = (int **)malloc(2*sizeof(int));
//myarr[1] = (int **)malloc(2*sizeof(int));
//myarr[0][0] = 0;
//myarr[0][1] = 1;
//myarr[1][0] = 2;
//myarr[1][1] = 3;
//int **d_myarr;
//cudaMallocManaged(&d_myarr, 2*sizeof(int *));
//cudaMemcpy(d_myarr, myarr, 2*sizeof(int *), cudaMemcpyDefault);
int **u_f_count;
cudaMallocManaged(&u_f_count, n_layers * sizeof(int *));
int *u_f_count_in;
cudaMallocManaged(&u_f_count_in, net_shape[0] * sizeof(int));
cudaMemcpy(u_f_count_in, f_count_in, net_shape[0] * sizeof(int), cudaMemcpyDefault);
//f_count[0] = u_f_count_in;
cudaMemcpy(&u_f_count[0], &u_f_count_in, sizeof(int *), cudaMemcpyDefault);
u_Fcal[0] = u_Fin;
for (int l = 0; l < n_layers-1; l++) {
//double **Fi = (double **) calloc(net_shape[l+1], sizeof(double *));
double **Fi;
cudaMallocManaged(&Fi, net_shape[l+1] * sizeof(double *));
u_Fcal[l+1] = Fi;
//double **Fi = (double **) calloc(net_shape[l+1], sizeof(double *));
int *f_countl;
cudaMallocManaged(&f_countl, net_shape[l+1] * sizeof(int));
cudaMemcpy(&u_f_count[l+1], &f_countl, sizeof(int *), cudaMemcpyDefault);
for (int n = 0; n < net_shape[l+1]; n++) {
// Init fire counts at 0.
u_f_count[l+1][n] = 0;
double *Fln;
//printf("Number A\n");
//printf("%d\n", f_max[l+1][n]);
//printf("Number Z\n");
cudaMallocManaged(&Fln, f_max[l+1][n] * sizeof(double));
//printf("Number B\n");
Fi[n] = Fln;
//printf("Number C\n");
// Initialize storeage to -1, so any negative firing time means did not fire.
for (int f = 0; f < f_max[l+1][n]; f++) {
Fi[n][f] = -1;
}
}
}
if (debug >= 1)
printf("After Fi copy\n");
//// Convert Connection weights to a C array
//// Ws[i] is the ith layer, Ws[i][j] is the jth row of layer i,
//// Ws[i][j][k] is the j,k element of layer i (row major ordering).
//double ***Ws_c = (double***)calloc(net_size-1, sizeof(double**));
//for (int l = 0; l < net_size-1; l++) {
// Ws_c[l] = (double**)calloc(net_shape[l], sizeof(double*));
// for (int n = 0; n < net_shape[l]; n++) {
// Ws_c[l][n] = Ws_in + wlo[l] + net_shape[l+1] * n;
// }
//}
// Do GAMMA(d)
// d_GAMMA[on][fi][l]][[h] Gives the instantaneous postsynaptic current of neuron h of layer l to firing time fi of output neuron on.
double ****d_GAMMA, ****d_GAMMAd;
if (copy_gamma) {
cudaMallocManaged(&d_GAMMA, (n_layers-1) * sizeof(double***));
cudaMallocManaged(&d_GAMMAd, (n_layers-1) * sizeof(double***));
for (int on = 0; on < net_shape[n_layers-1]; on++) {
cudaMallocManaged(&d_GAMMA[on], f_max[n_layers-1][on] * sizeof(double **));
cudaMallocManaged(&d_GAMMAd[on], f_max[n_layers-1][on] * sizeof(double **));
for (int fi = 0; fi < f_max[n_layers-1][on]; fi++) {
cudaMallocManaged(&d_GAMMA[on][fi], n_layers * sizeof(double*));
cudaMallocManaged(&d_GAMMAd[on][fi], n_layers * sizeof(double*));
for (int l = 0; l < n_layers; l++) {
cudaMallocManaged(&d_GAMMA[on][fi][l], net_shape[l] * sizeof(double));
cudaMallocManaged(&d_GAMMAd[on][fi][l], net_shape[l] * sizeof(double));
for (int h = 0; h < net_shape[l]; h++) {
d_GAMMA[on][fi][l][h] = -1;
d_GAMMAd[on][fi][l][h] = -1;
}
}
}
}
if (debug >= 1)
printf("Initted GAMMA storage \n");
}
// Copy weights to unified memory
double ***u_Ws;
cudaMallocManaged(&u_Ws, (n_layers-1) * sizeof(double**));
for (int l = 0; l < n_layers-1; l++) {
double **u_Wsl;
cudaMallocManaged(&u_Wsl, (net_shape[l+1]) * sizeof(double*));
u_Ws[l] = u_Wsl;
for (int n = 0; n < net_shape[l+1]; n++) {
double *u_Wsln;
cudaMallocManaged(&u_Wsln, net_shape[l] * sizeof(double));
cudaMemcpy(u_Wsln, Ws[l][n], net_shape[l] * sizeof(double), cudaMemcpyDefault);
u_Ws[l][n] = u_Wsln;
}
}
if (debug >= 1)
printf("After Weights copy\n");
// Copy network shape to unified memory
int *u_net_shape;
cudaMallocManaged(&u_net_shape, n_layers * sizeof(int));
cudaMemcpy(u_net_shape, net_shape, n_layers * sizeof(int), cudaMemcpyDefault);
// Run actual inference
//TODO: Should just be + 1
int n_blocks = max_neur / THREADS_PER_BLOCK + 1;
// Main Loop
for (int l = 0; l < n_layers; l++) {
if (debug >= 1)
printf(" Solving Layer %d...\n", l);
par_c_main_loop<<<n_blocks, THREADS_PER_BLOCK>>>(ALPHA, u_Fcal, u_f_count, u_Ws, u_net_shape, n_layers,
t_steps, t_eps, l, d_GAMMA, d_GAMMAd, copy_gamma);
cudaDeviceSynchronize();
}
if (debug >= 1)
printf("After main loop\n");
// Clean up
for (int i = 0; i < n_layers; i++) {
for (int j = 0; j < t_steps; j++) {
cudaFree(ALPHA[i][j]);
}
cudaFree(ALPHA[i]);
if (i > 0) {
for (int j = 0; j < net_shape[i]; j++) {
cudaFree(OMEGA[i-1][j]);
}
cudaFree(OMEGA[i-1]);
}
}
cudaFree(ALPHA);
cudaFree(OMEGA);
if (debug >= 1)
printf("After Free\n");
// Copy Fcal to host memory
//double ***Fcal = (double ***)malloc(n_layers * sizeof(double **));
//for (int l = 0; l < n_layers; l++) {
// Fcal[l] = (double **)malloc(net_shape[l] * sizeof(double *));
// for (int n = 0; n < net_shape[l]; n++) {
// Fcal[l][n] = (double *)malloc(f_max[l][n] * sizeof(double));
// cudaMemcpy(Fcal[l][n], u_Fcal[l][n], f_max[l][n] * sizeof(double), cudaMemcpyDefault);
// }
//}
// Copy output spikes to host memory
double **Fout = (double **)malloc(net_shape[n_layers-1]*sizeof(double*));
for (int n = 0; n < net_shape[n_layers-1]; n++) {
Fout[n] = (double *)malloc(f_max[n_layers-1][n] * sizeof(double));
cudaMemcpy(Fout[n], u_Fcal[n_layers-1][n], f_max[n_layers-1][n] * sizeof(double), cudaMemcpyDefault);
}
// Copy f_count to host memory
for (int l = 0; l < n_layers; l++) {
f_count[l] = (int *)calloc(net_shape[l], sizeof(int));
cudaMemcpy(f_count[l], u_f_count[l], net_shape[l] * sizeof(int), cudaMemcpyDefault);
}
if (debug >= 1)
printf("After output spike spike/f_count\n");
// Copy to host memory
// d_GAMMA[on][fi][l]][[h] Gives the instantaneous postsynaptic current of neuron h of layer l to firing time fi of output neuron on.
//GAMMA = (double****)malloc((n_layers-1) * sizeof(double***));
//GAMMAd = (double****)malloc((n_layers-1) * sizeof(double***));
if (copy_gamma) {
for (int on = 0; on < net_shape[n_layers-1]; on++) {
GAMMA[on] = (double***)malloc(f_max[n_layers-1][on] * sizeof(double**));
GAMMAd[on] = (double***)malloc(f_max[n_layers-1][on] * sizeof(double**));
for (int fi = 0; fi < f_max[n_layers-1][on]; fi++) {
GAMMA[on][fi] = (double**)malloc(n_layers * sizeof(double*));
GAMMAd[on][fi] = (double**)malloc(n_layers * sizeof(double*));
for (int l = 0; l < n_layers; l++) {
GAMMA[on][fi][l] = (double*)malloc(net_shape[l] * sizeof(double));
GAMMAd[on][fi][l] = (double*)malloc(net_shape[l] * sizeof(double));
cudaMemcpy(GAMMA[on][fi][l], d_GAMMA[on][fi][l], net_shape[l] * sizeof(double), cudaMemcpyDefault);
cudaMemcpy(GAMMAd[on][fi][l], d_GAMMAd[on][fi][l], net_shape[l] * sizeof(double), cudaMemcpyDefault);
}
}
}
if (debug >= 1)
printf("After GAMMA copy\n");
}
//TODO: copy f_count
return(Fout);
}
|
11,826 | #include <omp.h>
#include "matrix.cuh"
#include <cstdio>
#include <cassert>
#include <memory.h>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <random>
#include "matrix.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
const int block_size = 256;
static std::mt19937 mt(time(NULL));
static std::normal_distribution<real> normal(0, 1);
matrix::matrix()
{
this->m = this->n = 0;
this->data = NULL;
}
matrix::matrix(int n)
{
assert(n > 0);
this->m = this->n = n;
cudaMalloc(&(this->data), sizeof(real)*n*n);
}
matrix::matrix(int m, int n)
{
assert(m > 0 && n > 0);
this->m = m;
this->n = n;
cudaMalloc(&(this->data), sizeof(real)*m*n);
}
matrix::matrix(const matrix &a)
{
this->m = a.m;
this->n = a.n;
cudaMalloc(&(this->data), sizeof(real)*a.m*a.n);
cudaMemcpy(this->data, a.data, sizeof(real)*n*m, cudaMemcpyDeviceToDevice);
}
int matrix::row_num()
{
return m;
}
int matrix::column_num()
{
return n;
}
__global__ void transpose_kernel(real* x, real* y, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < m*n; i += stride) {
int k = i / n;
int h = i%n;
y[h*m + k] = x[k*n + h];
}
}
matrix matrix::transpose()
{
matrix res(n, m);
int numBlocks = (n*m + block_size - 1) / block_size;
transpose_kernel <<<numBlocks, block_size >>>(this->data, res.data, m, n);
return res;
}
matrix matrix::row(int k)
{
assert(k > 0 && k <= m);
matrix res(1, n);
cudaMemcpy(res.data, data + n*(k - 1), sizeof(real)*n, cudaMemcpyDeviceToDevice);
return res;
}
matrix matrix::row(int h, int k)
{
assert(h > 0 && k >= h && k <= m);
int len = k - h + 1;
matrix res(len, n);
real* tmp = new real[len*n];
cudaMemcpy(res.data, data + n*(h - 1), sizeof(real)*n*len, cudaMemcpyDeviceToDevice);
return res;
}
__global__ void get_column(real* x,real *y, int m, int n, int k, int h)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int len = (h - k + 1);
for (int g = index; g < m*len; g+=stride) {
int i = g / len;
int j = g%len;
y[g] = x[i*n + j + k];
}
}
matrix matrix::column(int k)
{
matrix res(m, 1);
k--;
int numBlocks = (m + block_size - 1) / block_size;
get_column<<<numBlocks, block_size>>>(data, res.data, m, n, k, k);;
return res;
}
matrix matrix::column(int h, int k)
{
assert(k >= h);
k--, h--;
matrix res(m, k - h + 1);
int numBlocks = (m*(k - h + 1) + block_size - 1) / block_size;
get_column << <numBlocks, block_size >> >(data, res.data, m, n, h, k);
return res;
}
real matrix::get(int k)
{
real y;
cudaMemcpy(&y, data + k - 1, sizeof(real), cudaMemcpyDeviceToHost);
return y;
}
real matrix::get(int k, int h)
{
k--;
h--;
real y;
cudaMemcpy(&y, data + k*n + h, sizeof(real), cudaMemcpyDeviceToHost);
return y;
}
void matrix::set(int k, real y)
{
cudaMemcpy(data + k - 1, &y, sizeof(real), cudaMemcpyHostToDevice);
}
void matrix::set(int k, int h, real y)
{
k--;
h--;
cudaMemcpy(data + k*n + h, &y, sizeof(real), cudaMemcpyHostToDevice);
}
real matrix::operator()(int k)
{
return get(k);
}
real matrix::operator()(int k, int h)
{
return get(k, h);
}
matrix& matrix::operator=(const matrix & a)
{
if (&a != this) {
if(this->data != NULL)
cudaFree(this->data);
//this->data = a.data;
cudaMalloc(&(this->data), sizeof(real)*a.m * a.n);
this->m = a.m;
this->n = a.n;
cudaMemcpy(this->data, a.data, sizeof(real)*n*m, cudaMemcpyDeviceToDevice);
}
return *this;
}
__global__ void add(real* x, real* y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] + y[i];
}
__global__ void add_column(real *x, real* y, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int k = index; k < m*n; k+=stride) {
int i = k / n;
x[k] += y[i];
}
}
matrix operator+(matrix &a, matrix &b)
{
if (b.n == 1 && a.m == b.m) {
matrix res = a;
int numBlocks = (a.m*a.n + block_size - 1) / block_size;
add_column << <numBlocks, block_size >> >(res.data, b.data, a.m, a.n);
return res;
}
else {
assert(a.m == b.m && a.n == b.n);
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
add << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m * a.n);
return res;
}
}
__global__ void add(real* x, real y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] + y;
}
matrix operator+(matrix &a, real b)
{
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
add << <numBlocks, block_size>> >(a.data, b, res.data, a.m*a.n);
return res;
}
__global__ void sub(real* x, real* y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] - y[i];
}
matrix operator-(matrix &a, matrix &b)
{
assert(a.m == b.m && a.n == b.n);
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
sub << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m * a.n);
return res;
}
__global__ void sub(real* x, real y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] - y;
}
matrix operator-(matrix &a, real b)
{
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
sub << <numBlocks, block_size >> >(a.data, b, res.data, a.m*a.n);
return res;
}
__global__ void mul_column(real *x, real* y, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int k = index; k < m*n; k+=stride) {
int i = k / n;
x[k] *= y[i];
}
}
__global__ void product(real* x, real *y, real* z,int m, int p, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int h = index; h < m*n; h += stride) {
int i = h / n;
int j = h%n;
z[h] = 0;
for (int k = 0; k < p; k++)
z[h] += x[i*p + k] * y[k*n + j];
}
}
matrix operator*(matrix & a, matrix & b)
{
if (b.n == 1 && a.m == b.m) {
matrix res = a;
int numBlocks = (a.m*a.n + block_size - 1) / block_size;
mul_column<<<numBlocks, block_size>>>(res.data, b.data, a.m, a.n);
return res;
}
else {
assert(a.n == b.m);
matrix res(a.m, b.n);
int numBlocks = (a.m*b.n + block_size - 1) / block_size;
product << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m, a.n, b.n);
return res;
}
}
__global__ void transposed_product_second(real* x, real* y, real* z, int m, int p, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int h = index; h < m*n; h += stride) {
int i = h / n;
int j = h%n;
z[h] = 0;
for (int k = 0; k < p; k++)
z[h] += x[i*p + k] * y[j*p + k];
}
}
matrix transposed_mul(matrix & a, matrix & b)
{
assert(a.n == b.n);
matrix res(a.m, b.m);
int numBlocks = (a.m*b.m + block_size - 1) / block_size;
transposed_product_second<<<numBlocks, block_size>>>(a.data, b.data, res.data, a.m, a.n, b.m);
return res;
}
__global__ void transposed_product_first(real* x, real* y, real* z, int m, int p, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int h = index; h < m*n; h += stride) {
int i = h / n;
int j = h%n;
z[h] = 0;
for (int k = 0; k < p; k++)
z[h] += x[k*m + i] * y[k*n + j];
}
}
void transposed_mul(matrix & a, matrix & b, matrix & res)
{
assert(a.m == b.m);
if (res.data != NULL)
cudaFree(res.data);
res.m = a.n;
res.n = b.n;
cudaMalloc(&res.data, sizeof(real)*a.n*b.n);
int numBlocks = (a.n*b.n + block_size - 1) / block_size;
transposed_product_first<< <numBlocks, block_size >> >(a.data, b.data, res.data, a.n, a.m, b.n);
}
__global__ void multiply(real* x, real y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] * y;
}
matrix operator*(matrix &a, real b)
{
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
multiply << <numBlocks, block_size>> >(a.data, b, res.data, a.m*a.n);
return res;
}
matrix operator*(real a, matrix &b)
{
matrix res(b.m, b.n);
int numBlocks = (b.n*b.m + block_size - 1) / block_size;
multiply << <numBlocks, block_size >> >(b.data, a, res.data, b.m*b.n);
return res;
}
__global__ void divide(real* x, real* y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] /= y[i];
}
__global__ void divide(real* x, real y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] / y;
}
__global__ void divide(real* x, real y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] /= y;
}
void matrix::operator/=(real a)
{
assert(a != 0);
int numBlocks = (n*m + block_size - 1) / block_size;
divide << <numBlocks, block_size >> >(data, a, m*n);
}
matrix operator/(matrix &a, real b)
{
matrix res(a.m, a.n);
assert(b != 0);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
divide << <numBlocks, block_size >> >(a.data, b, res.data, a.m*a.n);
return res;
}
__global__ void add(real* x, real* y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] += y[i];
}
void matrix::operator+=(matrix &a)
{
if (a.n == 1) {
assert(m == a.m);
int numBlocks = (a.m*a.n + block_size - 1) / block_size;
add_column << <numBlocks, block_size >> >(this->data, a.data, m, n);
}
else {
assert(m == a.m && n == a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
add << <numBlocks, block_size >> >(this->data, a.data, m*n);
}
}
__global__ void add(real* x, real y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] += y;
}
void matrix::operator+=(real a)
{
int numBlocks = (n*m + block_size - 1) / block_size;
add << <numBlocks, block_size >> >(this->data, a, m*n);
}
__global__ void sub(real* x, real* y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] -= y[i];
}
__global__ void sub_column(real *x, real* y, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int k = index; k < m*n; k+=stride) {
int i = k / n;
x[k] -= y[i];
}
}
void matrix::operator-=(matrix &a)
{
if (a.n == 1) {
assert(a.m == m);
int numBlocks = (m*n + block_size - 1) / block_size;
sub_column << <numBlocks, block_size >> >(this->data, a.data, m, n);
}
else {
assert(a.m == m && a.n == n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
sub << <numBlocks, block_size >> >(data, a.data, m*n);
}
}
void matrix::operator-=(real a)
{
int numBlocks = (n*m + block_size - 1) / block_size;
add << <numBlocks, block_size >> >(this->data, -a, m*n);
}
void matrix::operator*=(matrix &a)
{
assert(a.n == 1 && a.m == m);
int numBlocks = (n*m + block_size - 1) / block_size;
mul_column << <numBlocks, block_size >> >(this->data, a.data, m, n);
}
__global__ void multiply(real* x, real y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] *= y;
}
void matrix::operator*=(real a)
{
int numBlocks = (n*m + block_size - 1) / block_size;
multiply << <numBlocks, block_size >> >(this->data, a, m*n);
}
__global__ void divide_column(real *x,real* y, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int k = index; k < m*n; k+=stride) {
int i = k / n;
x[k] /= y[i];
}
}
void matrix::operator/=(matrix & a)
{
if (a.n == 1) {
assert(a.m == m);
int numBlocks = (m*n + block_size - 1) / block_size;
divide_column << <numBlocks, block_size >> >(this->data, a.data, m, n);
}else if (a.m == m && a.n == n) {
int numBlocks = (n*m + block_size - 1) / block_size;
divide << <numBlocks, block_size >> >(this->data, a.data, m*n);
}
}
__global__ void cut_off(real *x, real y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] = x[i] >= y ? x[i] : 0.0f;
}
void matrix::mask(real x)
{
int numBlocks = (n*m + block_size - 1) / block_size;
cut_off << <numBlocks, block_size >> >(data, x, m*n);
}
void matrix::print()
{
putchar('\n');
real* tmp = (real*)malloc(sizeof(real)*m*n);
cudaMemcpy(tmp, data, sizeof(real)*m*n, cudaMemcpyDeviceToHost);
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
printf("\t%8g", tmp[i*n + j]);
}
putchar('\n');
}
free(tmp);
}
void matrix::foutput(FILE * f)
{
fwrite(&m, sizeof(int), 1, f);
fwrite(&n, sizeof(int), 1, f);
real* tmp = (real*)malloc(sizeof(real)*m*n);
cudaMemcpy(tmp, data, sizeof(real)*m*n, cudaMemcpyDeviceToHost);
fwrite(tmp, sizeof(real), m*n, f);
free(tmp);
}
void matrix::fload(FILE * f)
{
fread(&m, sizeof(int), 1, f);
fread(&n, sizeof(int), 1, f);
real* tmp = (real*)malloc(sizeof(real)*m*n);
fread(tmp, sizeof(real), m*n, f);
if (data != NULL)
cudaFree(data);
cudaMalloc(&data, sizeof(real)*m*n);
cudaMemcpy(data, tmp, sizeof(real)*m*n, cudaMemcpyHostToDevice);
free(tmp);
}
matrix::~matrix()
{
if(this->data != NULL)
cudaFree(this->data);
this->data = NULL;
m = n = 0;
}
void mul(matrix & a, matrix & b, matrix & res)
{
assert(a.n == b.m);
if (res.data != NULL)
cudaFree(res.data);
res.m = a.m;
res.n = b.n;
cudaMalloc(&res.data, sizeof(real)*a.m*b.n);
int numBlocks = (a.m*b.n + block_size - 1) / block_size;
product << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m, a.n, b.n);
}
void mul(matrix & a, real & b, matrix & res)
{
res.m = a.m;
res.n = a.n;
if (res.data != NULL)
free(res.data);
cudaMalloc(&res.data, res.m*res.n*sizeof(real));
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
multiply << <numBlocks, 256 >> >(a.data, b, res.data, a.m*a.n);
}
__global__ void greater_than(real* x, real y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] > y ? 1.0f : 0.0f;
}
matrix operator>(matrix &a, real b)
{
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
greater_than << <numBlocks,block_size >> >(a.data, b, res.data, a.m*a.n);
return res;
}
__global__ void greater_than(real* x, real* y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] > y[i] ? 1.0f : 0.0f;
}
matrix operator>(matrix &a, matrix &b)
{
assert(a.m == b.m && a.n == b.n);
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
greater_than << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m*a.n);
return res;
}
__global__ void greater_equal(real* x, real y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] >= y ? 1.0f : 0.0f;
}
matrix operator>=(matrix &a, real b)
{
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
greater_equal << <numBlocks, block_size >> >(a.data, b, res.data, a.m*a.n);
return res;
}
__global__ void greater_equal(real* x, real* y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] >= y[i] ? 1.0f : 0.0f;
}
matrix operator>=(matrix &a, matrix &b)
{
assert(a.m == b.m && a.n == b.n);
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
greater_equal << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m*a.n);
return res;
}
__global__ void equal(real* x, real y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] == y ? 1.0f : 0.0f;
}
matrix operator==(matrix &a, real b)
{
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
equal << <numBlocks, block_size >> >(a.data, b, res.data, a.m*a.n);
return res;
}
__global__ void equal(real* x, real* y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] == y[i] ? 1.0f : 0.0f;
}
matrix operator==(matrix &a, matrix &b)
{
assert(a.m == b.m && a.n == b.n);
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
equal << <numBlocks, block_size>> >(a.data, b.data, res.data, a.m*a.n);
return res;
}
__global__ void maximum(real* x, real y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] > y ? x[i] : y;
}
__global__ void maximum(real* x, real* y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] > y[i]?x[i] : y[i];
}
matrix max(real a, matrix &b)
{
matrix res(b.m, b.n);
int numBlocks = (b.n*b.m + block_size - 1) / block_size;
maximum << <numBlocks, block_size>> >(b.data, a, res.data, b.m*b.n);
return res;
}
matrix max(matrix &a, real b)
{
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
maximum << <numBlocks, block_size>> >(a.data, b, res.data, a.m*a.n);
return res;
}
matrix max(matrix &a, matrix &b)
{
assert(a.m == b.m && a.n == b.n);
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
maximum << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m*a.n);
return res;
}
int size(matrix &a, int d)
{
assert(d == 1 || d == 2);
if (d == 1)
return a.m;
else
return a.n;
}
matrix size(matrix & a)
{
matrix res(1, 2);
real tmp[2] = {(real)a.m, (real)a.n};
cudaMemcpy(res.data, tmp, 2 * sizeof(real), cudaMemcpyHostToDevice);
return res;
}
__global__ void sum_kernel(real* x, real* y, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < m; i += stride) {
y[i] = 0;
for (int j = 0; j < n; j++) {
y[i] += x[i*n + j];
}
}
}
matrix sum(matrix & a)
{
matrix res(a.m, 1);
int numBlocks = (a.m + block_size - 1) / block_size;
sum_kernel<<<numBlocks, block_size>>>(a.data, res.data, a.m, a.n);
return res;
}
__global__ void sum_all_kernel(real* x, real* y, int m, int n)
{
for (int i = 0; i < m*n; i++) {
*y += x[i];
}
}
real sum_all(matrix& a)
{
real* y, res;
cudaMalloc(&y, sizeof(real));
sum_all_kernel<<<1,1>>>(a.data, y, a.m, a.n);
cudaMemcpy(&res, y, sizeof(real), cudaMemcpyDeviceToHost);
cudaFree(y);
return res;
}
__global__ void norm2_kernel(real* x, real* y, int m, int n)
{
/*int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < m; i += stride) {
y[index] = x[i] * x[i];
}*/
for (int i = 0; i < m*n; i++) {
*y += x[i] * x[i];
}
}
real norm2(matrix & a)
{
real* y, res;
cudaMalloc(&y,sizeof(real));
norm2_kernel<<<1,1>>>(a.data, y, a.m, a.n);
cudaMemcpy(&res, y, sizeof(real), cudaMemcpyDeviceToHost);
cudaFree(y);
return sqrt(res);
}
__global__ void mean_kernel(real* x, real* y, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < m; i += stride) {
y[i] = 0;
for (int j = 0; j < n; j++) {
y[i] += x[i*n + j];
}
y[i] /= n;
}
}
matrix mean(matrix & a)
{
matrix res(a.m, 1);
int numBlocks = (a.m + block_size - 1) / block_size;
mean_kernel << <numBlocks, block_size >> >(a.data, res.data, a.m, a.n);
return res;
}
__global__ void set(real* x, real y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
x[i] = y;
}
matrix zeros(int n)
{
matrix res(n);
int numBlocks = (n*n + block_size - 1) / block_size;
set << <numBlocks, block_size >> >(res.data, 0.0f, n*n);
return res;
}
matrix zeros(matrix & a)
{
real tmp[2];
cudaMemcpy(tmp, a.data, 2*sizeof(real), cudaMemcpyDeviceToHost);
int m = tmp[0];
int n = tmp[1];
matrix res(m, n);
int numBlocks = (m*n + block_size - 1) / block_size;
set << <numBlocks, block_size >> >(res.data, 0.0f, m*n);
return res;
}
matrix zeros(int m, int n)
{
matrix res(m, n);
int numBlocks = (m*n + block_size - 1) / block_size;
set << <numBlocks, block_size >> >(res.data, 0.0f, m*n);
return res;
}
matrix ones(int n)
{
matrix res(n);
int numBlocks = (n*n + block_size - 1) / block_size;
set << <numBlocks, block_size >> >(res.data, 1.0f, n*n);
return res;
}
matrix ones(int m, int n)
{
matrix res(m, n);
int numBlocks = (m*n + block_size - 1) / block_size;
set << <numBlocks, block_size >> >(res.data, 1.0f, m*n);
return res;
}
matrix ones(matrix & a)
{
real tmp[2];
cudaMemcpy(tmp, a.data, 2*sizeof(real), cudaMemcpyDeviceToHost);
int m = tmp[0];
int n = tmp[1];
matrix res(m, n);
int numBlocks = (m*n + block_size - 1) / block_size;
set << <numBlocks, block_size >> >(res.data, 1.0f, m*n);
return res;
}
__global__ void square_root(real* x, real* y, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = gridDim.x*blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = sqrt((double)x[i]);
}
matrix sqrt(matrix &a)
{
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
square_root << <numBlocks, block_size>> >(a.data, res.data, a.m*a.n);
return res;
}
__global__ void multiply(real* x, real* y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] * y[i];
}
matrix dot_mul(matrix &a, matrix &b)
{
assert(a.m == b.m && a.n == b.n);
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
multiply << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m*a.n);
return res;
}
__global__ void divide(real* x, real* y, real* z, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = x[i] / y[i];
}
matrix dot_div(matrix &a, matrix &b)
{
assert(a.m == b.m && a.n == b.n);
matrix res(a.m, a.n);
int numBlocks = (a.n*a.m + block_size - 1) / block_size;
divide << <numBlocks, block_size >> >(a.data, b.data, res.data, a.m*a.n);
return res;
}
__global__ void get_dx_kernel(real* dx, real* X, real* y, real* loss, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int p = index; p < n; p+= stride) {
loss[p] = 0;
for (int i = 0; i < m; i++) {
int k = y[p] - 1;
if (i != k && X[i*n + p] + 1 > X[k*n + p]) {
dx[i*n + p]++;
dx[k*n + p]--;
loss[p] += X[i*n + p] + 1 - X[k*n + p];
}
}
}
}
real get_dx(matrix& dx, matrix& X, matrix & y)
{
if (dx.data != NULL)
cudaFree(dx.data);
dx.m = X.m;
dx.n = X.n;
matrix loss(1, dx.n);
cudaMalloc(&dx.data, sizeof(real)*dx.m*dx.n);
int numBlocks = (dx.n + block_size - 1) / block_size;
set << <numBlocks, block_size >> >(dx.data, 0, dx.m*dx.n);
get_dx_kernel<<<numBlocks, block_size>>>(dx.data, X.data, y.data, loss.data, dx.m, dx.n);
return sum(loss)(1);
}
__global__ void get_dW_kernel(real* dW, real* dx, real* Y, int m, int n, int b)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int k = index; k < m*n; k += stride) {
int i = k / n;
int j = k%n;
dW[k] = 0;
for (int h = 0; h < b; h++)
dW[k] += dx[i*b + h] * Y[j*b + h];
}
}
void get_dW(matrix & dW, matrix & dx, matrix & Y)
{
dW.m = dx.m;
dW.n = Y.m;
if (dW.data != NULL)
cudaFree(dW.data);
cudaMalloc(&dW.data, sizeof(real)*dW.m*dW.n);
int numBlocks = (dW.n*dW.m + block_size - 1) / block_size;
get_dW_kernel<<<numBlocks,block_size>>>(dW.data, dx.data, Y.data, dW.m, dW.n, Y.n);
}
__global__ void update_dX1_kernel(real* dx,real* dx_next, real* W, real* Y, int m, int n , int b)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int k = index; k < n*b; k += stride) {
int i = k / b;
int j = k%b;
dx_next[k] = 0;
if (Y[k] <= 0)
break;
for (int h = 0; h < m; h++)
dx_next[k] += W[h*n + i] * dx[h*b + j];
}
}
void update_dX1(matrix & W, matrix & dx, matrix & Y)
{
real* dx_next;
cudaMalloc(&dx_next, sizeof(real)*W.n*dx.n);
assert(dx.m == W.m && dx.n == Y.n && Y.m == W.n);
int numBlocks = (W.n*dx.n + block_size - 1) / block_size;
update_dX1_kernel<<<numBlocks,block_size>>>(dx.data, dx_next, W.data, Y.data, W.m, W.n, dx.n);
cudaFree(dx.data);
dx.data = dx_next;
dx.m = Y.m;
dx.n = Y.n;
}
__global__ void update_dX2_kernel(real* dx, real* gamma, real* sigma, int m, int n)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int k = index; k < m*n; k += stride) {
int i = k / n;
dx[k] *= gamma[i] / sigma[i];
}
}
void update_dX2(matrix& dx, matrix& gamma, matrix& sigma)
{
int numBlocks = (dx.n*dx.m + block_size - 1) / block_size;
update_dX2_kernel << <numBlocks, block_size >> >(dx.data, gamma.data, sigma.data, dx.m, dx.n);
}
matrix randn(int m, int n)
{
matrix res(m, n);
real* tmp = (real*)malloc(sizeof(real)*m*n);
for (int i = 0; i < m*n; i++)
tmp[i] = normal(mt);
cudaMemcpy(res.data, tmp, sizeof(real)*m*n, cudaMemcpyHostToDevice);
free(tmp);
return res;
}
matrix randn(int n)
{
matrix res(n);
real* tmp = (real*)malloc(sizeof(real)*n*n);
for (int i = 0; i < n*n; i++)
tmp[i] = normal(mt);
cudaMemcpy(res.data, tmp, sizeof(real)*n*n, cudaMemcpyHostToDevice);
free(tmp);
return res;
} |
11,827 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#define numThreads 1024
#define MAX_CHAR_PER_LINE 128
#define NONE 0
#define FIRST 1
#define LAST 2
#define BOTH 3
double* file_read(char *filename,int *numCoords,int *numObjs)
{
double *objects;
int i, j, len;
//ssize_t numBytesRead;
int done=0;
FILE *infile;
char *line, *ret;
int lineLen;
//don't skip lines or attributes for this project
int lines_to_skip=0;
int attr_to_skip=0;
if ((infile = fopen(filename, "r")) == NULL) {
fprintf(stderr, "Error: no such file (%s)\n", filename);
return NULL;
}
/* first find the number of objects */
lineLen = MAX_CHAR_PER_LINE;
line = (char*) malloc(lineLen);
assert(line != NULL);
(*numCoords) = 0;
while (fgets(line, lineLen, infile) != NULL) {
/* check each line to find the max line length */
while (strlen(line) == lineLen-1) {
/* this line read is not complete */
len = strlen(line);
fseek(infile, -len, SEEK_CUR);
/* increase lineLen */
lineLen += MAX_CHAR_PER_LINE;
line = (char*) realloc(line, lineLen);
assert(line != NULL);
ret = fgets(line, lineLen, infile);
assert(ret != NULL);
}
if (strtok(line, " \t\n") != 0)
(*numCoords)++;
}
(*numCoords)-=lines_to_skip;
if((*numCoords)<=0)
{
fprintf(stderr, "Error: No objects found\n");
return NULL;
}
rewind(infile);
/*find the number of attributes*/
(*numObjs)=0;
fgets(line, lineLen, infile);
char * pch;
pch=strtok(line, ",;");
while (pch != NULL )
{
pch = strtok (NULL, ",;");
(*numObjs)++;
}
if(attr_to_skip!=NONE)
{
(*numObjs)--;
if(attr_to_skip==BOTH)
(*numObjs)--;
}
rewind(infile);
/* allocate space for objects and read all objects */
len = (*numCoords) * (*numObjs);
objects = (double*)malloc( len * sizeof(double));
assert(objects != NULL);
/* read all objects */
for(i=0;i<lines_to_skip;i++)
fgets(line, lineLen, infile);
i=0;
j=0;
while (fgets(line, lineLen, infile) != NULL)
{
pch=strtok(line, ",;");
while (pch != NULL && j<(*numObjs))
{
if(attr_to_skip%2==1 && j==0 && done==0)
{
done=1;
pch = strtok (NULL, ",;");
continue;
}
objects[i*(*numObjs)+j]=atof(pch);
pch = strtok (NULL, ",;");
j++;
}
i++;
j=0;
done=0;
}
assert(i == *numCoords);
fclose(infile);
free(line);
return objects;
}
__global__ void medval(double *M_d,double *w_d,int rows, int cols)
{
int row = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.0;
if (row < rows){
for (int i=0;i<cols;i++){
sum += M_d[row*cols + i];
}
w_d[row] = sum/cols;
}
}
__global__ void MatrixVecMul(double* M_d, double* x_d, double* z_d, double* w_d, int rows, int cols){
int row = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.0;
if (row < rows){
for (int i=0;i<cols;i++){
sum += (M_d[row*cols + i] - w_d[row])*x_d[i];
}
z_d[row] = sum;
}
}
__global__ void TMatrixVecMul(double *M_d, double* x_d, double* z_d, double* k_d, double* w_d, int rows, int cols, float* norm_d){
int row = blockIdx.x*blockDim.x + threadIdx.x;
double sum = 0.0;
if (row < cols){
for (int i=0;i<rows;i++){
sum += (M_d[i*cols + row] - w_d[i])*z_d[i];
}
k_d[row] = sum;
atomicAdd(norm_d,k_d[row]*k_d[row]);
}
}
__global__ void DivideNorm(double* k_d, float* norm, int cols){
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < cols) k_d[index] = k_d[index]/sqrt(*norm);
}
__global__ void CalculateEps(double* k_d, double* x_d, double* e_d, float* eps, int cols){
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < cols){
e_d[index] = k_d[index] - x_d[index];
atomicAdd(eps,e_d[index]*e_d[index]);
}
}
int main(int argc, char **argv){
cudaSetDevice(0);
int rows,cols;
double* M_h = file_read(argv[1],&rows,&cols);
int size = rows*cols*sizeof(double);
double* x_h = (double*)malloc(cols*sizeof(double));
double* z_h = (double*)malloc(rows*sizeof(double));
double* k_h = (double*)malloc(cols*sizeof(double));
float* e = (float*)malloc(sizeof(float));
float* norm_h = (float*)malloc(sizeof(float));
float* norm_d;
float* eps;
double* e_d;
double* M_d;
double* x_d;
double* k_d;
double* z_d;
double* w_d;
for (int i=0;i<cols;i++) x_h[i] = 1;
cudaMalloc((void**) &M_d, size);
cudaMalloc((void**) &x_d, cols*sizeof(double));
cudaMalloc((void**) &z_d, rows*sizeof(double));
cudaMalloc((void**) &k_d, cols*sizeof(double));
cudaMalloc((void**) &e_d, cols*sizeof(double));
cudaMalloc((void**) &w_d, rows*sizeof(double));
cudaMalloc((void**) &norm_d, sizeof(float));
cudaMalloc((void**) &eps, sizeof(float));
cudaMemcpy(M_d, M_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(x_d, x_h, cols*sizeof(double), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
cudaEventRecord(start);
medval<<<(rows - 1)/numThreads + 1,numThreads>>>(M_d,w_d,rows,cols);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
for (int i=0;i<100;i++){
*norm_h = 0;
*e = 0;
cudaMemcpy(norm_d, norm_h, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(eps, e, sizeof(float), cudaMemcpyHostToDevice);
if (i !=0){
for (int k=0;k<cols;k++) x_h[k] = k_h[k];
cudaMemcpy(x_d, x_h, cols*sizeof(double), cudaMemcpyHostToDevice);
}
cudaEventRecord(start);
MatrixVecMul<<<(rows - 1)/numThreads + 1,numThreads>>>(M_d,x_d,z_d,w_d,rows,cols);
TMatrixVecMul<<<(cols - 1)/numThreads + 1,numThreads>>>(M_d,x_d,z_d,k_d, w_d, rows,cols,norm_d);
DivideNorm<<<(cols - 1)/numThreads + 1,numThreads>>>(k_d, norm_d, cols);
CalculateEps<<<(cols - 1)/numThreads + 1,numThreads>>>(k_d, x_d, e_d, eps, cols);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
elapsedTime += milliseconds;
cudaMemcpy(k_h,k_d,cols*sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(e, eps, sizeof(float), cudaMemcpyDeviceToHost);
if (sqrt(*e) < 0.000001) break;
}
printf("Elapsed Time: %f ms\n", elapsedTime);
FILE *f = fopen("results_simple.csv", "w");
for (int i=0;i<cols;i++) fprintf(f,"%.7f%s", k_h[i],(i<cols-1)?",":"");
fprintf(f,"\n");
fclose(f);
cudaFree(M_d);
cudaFree(x_d);
cudaFree(z_d);
cudaFree(k_d);
cudaFree(w_d);
cudaFree(norm_d);
cudaFree(e_d);
cudaFree(eps);
free(M_h);
free(x_h);
free(z_h);
free(k_h);
free(norm_h);
free(e);
return 0;
}
|
11,828 | #include <cuda.h>
#include <stdio.h>
void printDevProp ( cudaDeviceProp devp )
{
printf ( " No . of multiprocessors : % d \n ",devp.multiProcessorCount ) ; // 24
printf ( " Size of warp % d \n " , devp.warpSize ) ; // 32
return ;
}
int main ()
{
int devCount ;
cudaGetDeviceCount(& devCount);
for (int i = 0; i < devCount;++i)
{
cudaDeviceProp devp ;
cudaGetDeviceProperties(&devp ,i);
printDevProp(devp) ;
}
return 0;
} |
11,829 | extern "C"
{
__global__ void vadd(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
c[i] = a[i] + b[i];
}
}
} |
11,830 | #include <stdio.h>
#include <cfloat>
const double voltsConstantMin = 0;
const double voltsConstantMax = 300; //0-300V peak
const double omegaConstantMin = 2.0*M_PI*40;
const double omegaConstantMax = 2.0*M_PI*70; //40-70Hz
const double phiConstant = 2.0*M_PI; //0-2PI radians
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__global__ void runDEagent(const unsigned long int S, const unsigned long int G, const double F, const double R, const size_t N,
double *bestScoreAndParameters_b, double *signalData_d, size_t signalLength, double *randomVector_d, const size_t randomLength,const size_t rate,
int currentGen, double *bestAgent)
{
extern __shared__ double agents_local[];
size_t randomOffset = (gridDim.x*blockDim.x*currentGen*N+(blockIdx.x*N*gridDim.x+threadIdx.x*N))%randomLength;
if(blockIdx.x==0&&threadIdx.x==0) printf("%lu/%lu\n",randomOffset,randomLength);
if(blockIdx.x==0&&threadIdx.x==0)
for (int i = 0; i < N+1; ++i) {
agents_local[i] = bestAgent[i];
}
else
for (int i = 0; i < N; ++i)
agents_local[threadIdx.x*(N+1)+i] = randomVector_d[++randomOffset%randomLength];
__syncthreads();
double *child;
child = (double *)malloc(sizeof(double)*N);
if(currentGen!=0 && (blockIdx.x!=0||threadIdx.x!=0))
{
// Reproduction
double *parents[3];
parents[0] = bestAgent;
parents[1] = 0; parents[2] = 0;
unsigned int randomParentIdx;
for (int i = 1; i < 3; ++i)
{
do
{
randomParentIdx = floor(blockDim.x*randomVector_d[++randomOffset%randomLength]);
} while(&agents_local[randomParentIdx*(N+1)] == parents[0] ||
&agents_local[randomParentIdx*(N+1)] == parents[1] ||
&agents_local[randomParentIdx*(N+1)] == parents[2]);
parents[i] = &agents_local[randomParentIdx*(N+1)];
}
for (int i = 0; i < N; ++i) {
double val = bestAgent[i];
val += (F*(parents[1][i]-parents[2][i]));
if(val<0.0) val=0.0;
else if(val>1.0) val=1.0;
child[i] = val;
}
// Crossover
unsigned int delta = floor(N*randomVector_d[++randomOffset%randomLength]);
for (int i = 0; i < N; ++i)
agents_local[threadIdx.x*(N+1)+i] =
(delta != i && randomVector_d[++randomOffset%randomLength]>R)?child[i]:bestAgent[i];
}
double volts = voltsConstantMin+agents_local[threadIdx.x*(N+1)]*(voltsConstantMax - voltsConstantMin);
double omega = omegaConstantMin+agents_local[threadIdx.x*(N+1)+1]*(omegaConstantMax - omegaConstantMin);
double phi = agents_local[threadIdx.x*(N+1)+2]*phiConstant;
double t,diff,accum=0.0;
for(size_t pos = 0; pos < signalLength; pos++)
{
t=(double)pos/(double)rate;
diff = volts*sin(
omega*t+
phi
)-signalData_d[pos];
accum += pow(diff,2);
}
agents_local[threadIdx.x*(N+1)+N] = accum;
__syncthreads(); //Wait for all threads of the block to end
//Calculate best agent
if(threadIdx.x==0)
{
double *bestAgentOfBlock = &agents_local[0];
for (int i = 1; i < blockDim.x; ++i)
if(agents_local[i*(N+1)+N] < bestAgentOfBlock[N])
bestAgentOfBlock = &agents_local[i*(N+1)];
for (int i = 0; i < N+1; ++i)
bestScoreAndParameters_b[blockIdx.x*(N+1)+i] = bestAgentOfBlock[i];
}
__syncthreads(); //Wait for all threads of the block to end
}
extern "C" void runDE(double *signalData, size_t signalLength, double *randomVector, const size_t randomLength,
const unsigned long int S, const unsigned long int G, const double F, const double R, const size_t N, const double epsilon, const size_t rate)
{
int nBlocks = (int)ceil((double)S/32);
int nTpb = 32;
double *signalData_d;
HANDLE_ERROR( cudaMalloc((void **)&signalData_d, signalLength*sizeof(double)) );
HANDLE_ERROR( cudaMemcpy( signalData_d, signalData, signalLength*sizeof(double), cudaMemcpyHostToDevice) );
double *randomVector_d;
HANDLE_ERROR( cudaMalloc((void **)&randomVector_d, randomLength*sizeof(double)) );
HANDLE_ERROR( cudaMemcpy( randomVector_d, randomVector, randomLength*sizeof(double), cudaMemcpyHostToDevice) );
double *bestScoreAndParameters_b;
HANDLE_ERROR( cudaMallocManaged(&bestScoreAndParameters_b,nBlocks*(N+1)*sizeof(double)) );
//Use max threads per block
struct cudaDeviceProp prop;
int cudaDevice;
HANDLE_ERROR( cudaGetDevice(&cudaDevice) );
HANDLE_ERROR( cudaGetDeviceProperties(&prop, cudaDevice) );
printf("Device used: %s\nmaxThreadsPerBlock: %d\nmaxGridSize: %dx%dx%d\n",
prop.name, prop.maxThreadsPerBlock, prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
//S is for population size (number of agents)
//Use one agent per thread, Use 32 agents per block, rounded up
double *bestAgent;
HANDLE_ERROR( cudaMallocManaged(&bestAgent,(N+1)*sizeof(double)) );
bestAgent[N]=DBL_MAX;
for (int currentGen = 0; currentGen <= G; ++currentGen)
{
if(currentGen == 28)
printf("Warning!\n");
printf("Generation: %d\n",currentGen);
runDEagent<<< nBlocks, nTpb, nTpb*(N+1)*sizeof(double)>>>(S,G,F,R,N,bestScoreAndParameters_b,signalData_d,signalLength,randomVector_d,randomLength,rate,currentGen, bestAgent);
cudaDeviceSynchronize(); //Wait for all blocks to finish
//Get best of them from bestScoreAndParameters (serial mode)
for (int b = 0; b < nBlocks; ++b)
if(bestScoreAndParameters_b[b*(N+1)+N] < bestAgent[N])
for (int i = 0; i < N+1; ++i) bestAgent[i] = bestScoreAndParameters_b[b*(N+1)+i];
double volts = voltsConstantMin+bestAgent[0]*(voltsConstantMax - voltsConstantMin);
double omega = omegaConstantMin+bestAgent[1]*(omegaConstantMax - omegaConstantMin);
double phi = bestAgent[2]*phiConstant;
printf("Best agent in generation %d. %lf, %lf, %lf, score: %lf\n",
currentGen, volts, omega, phi, bestAgent[N]);
}
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaFree(signalData_d) );
HANDLE_ERROR( cudaFree(bestScoreAndParameters_b) );
cudaDeviceReset();
return;
}
|
11,831 | #include <bits/stdc++.h>
#include <cuda.h>
using namespace std;
__global__ void MatrixAddKernel(float *da, float *db, float* dc, int n, int m) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n*m) {
dc[i] = da[i] + db[i];
}
}
void MatrixAdd(float *a, float *b, float *c, int n, int m) {
float *da, *db, *dc;
int size = n*m * sizeof(sizeof(float));
// memory allocation
cudaMalloc((void**)&da, size);
cudaMalloc((void**)&db, size);
cudaMalloc((void**)&dc, size);
// transfer memory from host to device
cudaMemcpy(da, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, size, cudaMemcpyHostToDevice);
// calling cuda kernel to add
int blockSize = 128;
int gridSize = (int)ceil((float)n/blockSize);
MatrixAddKernel<<<gridSize, blockSize>>>(da, db, dc, n, m);
// transfer memory from device to host
cudaMemcpy(c, dc, size, cudaMemcpyDeviceToHost);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return;
}
int main() {
int n, m;
cout << "Enter n: ";
cin >> n;
cout << "Enter m: ";
cin >> m;
// allocating memories
float *a, *b, *c;
a = (float*)malloc(n*m * sizeof(float));
b = (float*)malloc(n*m * sizeof(float));
c = (float*)malloc(n*m * sizeof(float));
int lower = 10, upper = 20;
for(int i=0; i<n*m; i++) {
int a1 = (rand() % (upper - lower + 1)) + lower;
int b1 = (rand() % (upper - lower + 1)) + lower;
a[i] = a1;
b[i] = b1;
}
cout << "A is : \n";
int t = 0;
for(int i=0; i<n; i++) {
for(int j=0; j<m; j++) {
cout << a[t] << " ";
t++;
}
cout << "\n";
}
t = 0;
cout << "\nB is : \n";
for(int i=0; i<n; i++) {
for(int j=0; j<m; j++) {
cout << b[t] << " ";
t++;
}
cout << "\n";
}
cout << "\nAfter adding...\n";
MatrixAdd(a, b, c, n, m);
t = 0;
cout << "C is : \n";
for(int i=0; i<n; i++) {
for(int j=0; j<m; j++) {
cout << c[t] << " ";
t++;
}
cout << "\n";
}
return 0;
}
|
11,832 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <cuda.h>
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//#include "common.h"
////#include "cuda_common.cuh"
//
//#define HISTOGRAM_BUCKET_SIZE 10
//
////---------------------------------------------------KERNELS AND CPU IMPLEMENTATIONS-----------------------------//
//
////in this kernel we expect grid with only single block
//__global__ void histogram_gpu_v01(int* input, int*output, int input_size)
//{
// atomicAdd(&(output[input[threadIdx.x]]),1);
//}
//
//// in this kernel we expect 1D grid with multiple 1D blocks
//// reduce the global memory writes by introducing shared memory intermideate store
//__global__ void histogram_gpu_v02(int* input, int*output, int input_size)
//{
// __shared__ int block_output[HISTOGRAM_BUCKET_SIZE];
//
// int gid = blockIdx.x*blockDim.x + threadIdx.x;
//
// atomicAdd(&(block_output[input[gid]]), 1);
// __syncthreads();
//
// if (threadIdx.x < HISTOGRAM_BUCKET_SIZE)
// {
// atomicAdd(&(output[threadIdx.x]), block_output[threadIdx.x]);
// }
//}
//
////crating thread for to represent each element in the array may be inefficient
////so change the kernel so that single threads handles multiple elements
//__global__ void histogram_gpu_v03(int* input, int*output, int input_size)
//{
// //to be implements
//}
//
//void histogram_cpu(int * input, int* output, int input_size)
//{
// for (int i = 0; i < input_size; i++)
// {
// output[input[i]]++;
// }
//}
//
//
////---------------------------------------------------RUNING FUNCTIONS--------------------------------------------//
//
//void run_histogram_cpu(int input_size, int histogram_buckets)
//{
// int * input, *output;
// int input_byte_size = sizeof(int)*input_size;
// int histogram_bucket_byte_size = sizeof(int)*histogram_buckets;
//
// input = (int*)malloc(input_byte_size);
// output = (int*)malloc(histogram_bucket_byte_size);
// memset(output, 0, histogram_bucket_byte_size);
//
// initialize(input, input_size);
// printf("Printing input array \n");
// print_array(input,input_size);
//
// histogram_cpu(input,output,input_size);
// printf("Printing histogram array \n");
// print_array(output, histogram_buckets);
//
// free(output);
// free(input);
//}
//
//void run_histogram_gpu(int input_size, int histogram_buckets)
//{
// int * h_input, *h_ref;
// int input_byte_size = sizeof(int)*input_size;
// int histogram_bucket_byte_size = sizeof(int)*histogram_buckets;
//
// h_input = (int*)malloc(input_byte_size);
// h_ref = (int*)malloc(histogram_bucket_byte_size);;
//
// initialize(h_input, input_size);
//
// int * d_input, *d_output;
// cudaMalloc((int**)&d_input, input_byte_size);
// cudaMalloc((int**)&d_output,histogram_bucket_byte_size);
//
// dim3 grid(4);
// dim3 block(input_size/grid.x);
//
// cudaMemset(d_output, 0, histogram_bucket_byte_size);
// cudaMemcpy(d_input, h_input, input_byte_size, cudaMemcpyHostToDevice);
//
// histogram_gpu_v02 << <grid,block >> > (d_input, d_output, input_size);
// cudaDeviceSynchronize();
//
// cudaMemcpy(h_ref, d_output, histogram_bucket_byte_size, cudaMemcpyDeviceToHost);
//
// print_array(h_ref,histogram_buckets);
//
// cudaFree(d_output);
// cudaFree(d_input);
//
// free(h_ref);
// free(h_input);
//}
//
////int main()
////{
//// printf("--------------------RUNNING HISTOGRAM EXAMPLE------------------------- \n");
//// int input_size = 1024;
//// int histogram_buckets = 10;
//// run_histogram_gpu(input_size,histogram_buckets);
////
//// system("pause");
//// return 0;
////} |
11,833 | #include "includes.h"
__global__ void dwt_compare(float *d_ip_v, float *d_ip_ir, int len) {
const int X = blockIdx.x * blockDim.x + threadIdx.x;
if (X < len)
{
d_ip_v[X] = (abs(d_ip_v[X]) > abs(d_ip_ir[X])) ? d_ip_v[X] : d_ip_ir[X];
}
} |
11,834 | #include <stdio.h>
#include <stdlib.h>
#define N (2048*2048)
#define block_Size 256
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecDot1(int *a, int *b, unsigned long long int *c)
{
__shared__ int temp[block_Size];
// Get our global thread ID
int index = blockIdx.x*blockDim.x+threadIdx.x;
// Create a temporary vector accessable by all threads in the block
temp[threadIdx.x] = a[index] * b[index];
// Thread barrier
__syncthreads();
// Let thread 0 in all blocks add up the temp vector
if( threadIdx.x == 0) {
unsigned long long int sum = 0;
for( int i = 0; i<block_Size; i++)
sum += (unsigned long long int)temp[i];
// Add result to the global sum
atomicAdd(c, sum);
}
}
int main( int argc, char* argv[] )
{
// Host vectors
int *h_a, *h_b;
unsigned long long int *h_c;
// Device vectors
int *d_a, *d_b;
unsigned long long int *d_c;
// Size, in bytes, of each vector
int size = N*sizeof(int);
// Allocate memory for each vector on host
h_a = (int*)malloc(size);
h_b = (int*)malloc(size);
// Ok, so this is the global sum, a vector of length 1!
h_c = (unsigned long long int*)malloc(sizeof(unsigned long long int));
// Allocate memory for each vector on GPU
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, sizeof(unsigned long long int));
int i;
// Initialize vectors on host
for( i = 0; i < N; i++ ) {
h_a[i] = i;
h_b[i] = 1;
}
*h_c = 0;
// Copy host vectors to device
cudaMemcpy( d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, size, cudaMemcpyHostToDevice);
cudaMemcpy( d_c, h_c, sizeof(unsigned long long int), cudaMemcpyHostToDevice);
// Execute the kernel
int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1);
vecDot1<<<grid_Size, block_Size>>>(d_a, d_b, d_c);
// Copy array back to host
cudaMemcpy( h_c, d_c, sizeof(unsigned long long int), cudaMemcpyDeviceToHost );
// Display the result
printf("A.B = %llu\n", *h_c);
printf("N*(N-1)/2 = %llu\n", (unsigned long long int) N*(N-1)/2);
// Release device memory
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
// Release host memory
free(h_a); free(h_b); free(h_c);
return 0;
}
|
11,835 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float* var_19,float* var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39,float var_40,float var_41,float var_42,float var_43,float var_44,float var_45,float var_46,float var_47) {
float tmp_1 = (+1.7017E-35f - (var_2 - sinf(var_3 / var_4)));
comp += tmp_1 * var_5 / (var_6 + var_7 - floorf(cosf(var_8 / -1.8038E-20f)));
comp += var_9 / (var_10 * -1.7062E35f + var_11 / var_12 * var_13);
comp = var_14 + (-1.0880E-36f * var_15 - (var_16 / var_17 * var_18));
for (int i=0; i < var_1; ++i) {
var_19[i] = +1.1136E-44f;
comp += var_19[i] + fmodf((+1.3711E35f - tanhf(-1.7783E-30f * var_21)), -1.8809E35f * +1.5638E-43f / (+1.7277E27f + (-1.7914E-35f / -1.1059E-43f)));
var_20[i] = (+1.5688E23f - var_22 / logf(powf(fmodf(+0.0f, (-1.0794E-35f * logf((var_23 * (var_24 / ldexpf(+1.1459E-3f / var_25 - +0.0f, 2)))))), var_26 / (-1.0921E34f * +1.3476E-37f + ceilf((+1.1791E34f * atan2f(cosf(var_27 + var_28), +1.4517E27f)))))));
comp = var_20[i] * (+1.4247E-22f * atanf((-0.0f * +1.7139E-43f)));
}
if (comp == atanf(-1.6008E1f)) {
float tmp_2 = (+1.1075E35f - atan2f(cosf((+0.0f / asinf(-1.3512E23f + var_29))), var_30 - log10f((+1.2994E-36f * -1.8890E-12f))));
comp += tmp_2 + var_31 / (var_32 - +1.1577E-41f);
comp = var_33 / +1.8852E35f - (var_34 / var_35 * +1.9336E-43f - var_36);
}
if (comp >= var_37 + (var_38 + (+1.4578E-41f / (-1.0898E-37f * var_39)))) {
float tmp_3 = +1.4485E3f;
float tmp_4 = ldexpf(expf(var_40 - -1.3678E-44f + var_41 + var_42), 2);
float tmp_5 = (+0.0f * +1.6230E-44f);
comp += tmp_5 / tmp_4 + tmp_3 * -1.0934E35f + var_43 * (+1.6037E34f * expf(-1.5607E-37f * var_44 + var_45 * var_46 + var_47));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float* tmp_20 = initPointer( atof(argv[20]) );
float* tmp_21 = initPointer( atof(argv[21]) );
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
float tmp_41 = atof(argv[41]);
float tmp_42 = atof(argv[42]);
float tmp_43 = atof(argv[43]);
float tmp_44 = atof(argv[44]);
float tmp_45 = atof(argv[45]);
float tmp_46 = atof(argv[46]);
float tmp_47 = atof(argv[47]);
float tmp_48 = atof(argv[48]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40,tmp_41,tmp_42,tmp_43,tmp_44,tmp_45,tmp_46,tmp_47,tmp_48);
cudaDeviceSynchronize();
return 0;
}
|
11,836 | #include "includes.h"
__global__ void is_nan_or_inf_kernel(float *input, size_t size, int *pinned_return)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
float val = input[index];
if (isnan(val) || isinf(val))
*pinned_return = 1;
}
} |
11,837 | #include <stdio.h>
#include <cuda.h>
__global__ void dkernel(unsigned *vector, unsigned vectorsize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if (id % 2) vector[id] = id;
else vector[id] = vectorsize * vectorsize;
}
#define BLOCKSIZE 10
#define N BLOCKSIZE
int main(int nn, char *str[]) {
unsigned *vector, *hvector;
cudaMalloc(&vector, N * sizeof(unsigned));
hvector = (unsigned *)malloc(N * sizeof(unsigned));
unsigned nblocks = ceil((float)N / BLOCKSIZE);
dkernel<<<nblocks, BLOCKSIZE>>>(vector, N);
cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
for (unsigned ii = 0; ii < N; ++ii) {
printf("%4d ", hvector[ii]);
}
printf("\n");
return 0;
}
|
11,838 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <string.h>
#include <ctype.h>
#include <limits.h>
#define INPUT_FILE "inp.txt"
#define NUM_BUCKETS 10
#define Q2A_OUT_FILE "q2a.txt"
#define Q2B_OUT_FILE "q2b.txt"
#define Q2C_OUT_FILE "q2c.txt"
typedef struct vector {
int *elements;
int capacity;
int size;
}vector;
void int_vector_init(vector *vector);
int int_vector_add(vector* vector, int element);
void int_vector_free(vector *vector);
int chopString(char *buf, size_t size);
void bucketize(vector *a, int *b);
int findMin(vector* vector);
void prefixSum(vector *a, int *b);
__global__ void global_reduce_kernel(int * d_out, int * d_in, int * d_intermediate, int size, bool phaseOne)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
if(phaseOne)
{
int index = d_in[myId] / 100;
//printf("block dim: %d\n", blockDim.x);
d_intermediate[myId*NUM_BUCKETS + index] = 1;
//printf("index: %d value: %d\n", myId*NUM_BUCKETS + index, d_in[myId] );
__syncthreads(); //every thread computes its bucket for number
}
//then combine using reduce
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if(tid < s && myId + s < size)
{
for (int i = 0; i < NUM_BUCKETS; i++)
{
d_intermediate[myId*NUM_BUCKETS + i] += d_intermediate[(myId + s)* NUM_BUCKETS + i];
}
}
__syncthreads();
}
//copy intermediate to block output
if (tid == 0)
{
for(int i = 0; i < NUM_BUCKETS; i++)
{
d_out[blockIdx.x*NUM_BUCKETS + i] = d_intermediate[myId*NUM_BUCKETS + i];
}
}
}
__global__ void shmem_reduce_kernel(int * d_out, const int * d_in, int size, bool phaseOne)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ int sdata[];//let's use sdata like we use d_intermediate above
//__shared__ int b[10] = {0};
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
if(phaseOne)
{
int index = d_in[myId] / 100;
for(int i = 0; i < NUM_BUCKETS; i++)
{
if(index == i)
{
sdata[tid*NUM_BUCKETS + i] = 1;
}
else
{
sdata[tid*NUM_BUCKETS + i] = 0;
}
}
}
else
{
for(int i = 0; i < NUM_BUCKETS; i++)
{
sdata[tid*NUM_BUCKETS + i] = d_in[myId*NUM_BUCKETS + i];
}
}
__syncthreads();
// make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if(tid < s && myId + s < size)
{
for (int i = 0; i < NUM_BUCKETS; i++)
{
sdata[tid*NUM_BUCKETS + i] += sdata[(tid + s)* NUM_BUCKETS + i];
}
}
__syncthreads();
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
for(int i = 0; i < NUM_BUCKETS; i++)
{
d_out[blockIdx.x*NUM_BUCKETS + i] += sdata[tid * NUM_BUCKETS + i];
}
}
}
__global__ void parallel_prefix_kernel(int * d_out, const int * d_in, int size) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int d = 1;
d_out[myId] = d_in[myId];
while(d < size)
{
if(myId+1 > d && myId < size)
{
d_out[myId] += d_out[myId - d];
}
d *= 2;
__syncthreads();
}
}
void reduce_a(int * b, int * b_intermediate, int * d_intermediate, int * d_in,
int size)
{
// assumes that size is not greater than maxThreadsPerBlock^2
// and that size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 1024; //increased from 512 to 1024 to handle 1024^2 values
int threads = maxThreadsPerBlock;
int blocks = (size + (maxThreadsPerBlock - 1)) / maxThreadsPerBlock;
//printf("blocks: %d, size: %d\n", blocks, size);
global_reduce_kernel<<<blocks, threads>>>(b_intermediate, d_in, d_intermediate, size, true);
int newSize = blocks;
int powerOfTwo = 1;
while (powerOfTwo < blocks)
{
powerOfTwo *= 2;
}
threads = powerOfTwo; // launch one thread for each block in prev step
blocks = 1;
global_reduce_kernel<<<blocks, threads>>>(b, d_in, b_intermediate, newSize, false);
}
void reduce_b(int * b, int * d_intermediate, int * d_in,
int size)
{
// assumes that size is not greater than maxThreadsPerBlock^2
// and that size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 1024; //increased from 512 to 1024 to handle 1024^2 values
int threads = maxThreadsPerBlock;
int blocks = (size + (maxThreadsPerBlock - 1)) / maxThreadsPerBlock;
shmem_reduce_kernel<<<blocks, threads, NUM_BUCKETS * threads * sizeof(int)>>>(d_intermediate, d_in, size, true);
int newSize = blocks;
int powerOfTwo = 1;
while (powerOfTwo < blocks)
{
powerOfTwo *= 2;
}
threads = powerOfTwo; // launch one thread for each block in prev step
blocks = 1;
shmem_reduce_kernel<<<blocks, threads, threads* NUM_BUCKETS * sizeof(int)>>>(b, d_intermediate, newSize, false);
}
void run_c(int *b, int * d_in, int size) {
parallel_prefix_kernel<<<10, 1>>>(b, d_in, size);
}
int main(int argc, char **argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
}
FILE *fp;
if((fp = fopen(INPUT_FILE, "r")) == 0)
{
printf("%s cannot be found\n", INPUT_FILE);
exit(-1);
}
char separators[] = " ,";
//int num;
char buf[100];
char* token;
int offset = 0;
vector *a = (vector*)malloc(sizeof (vector));
int_vector_init(a);
while(fgets(buf + offset, sizeof buf - offset, fp) != NULL)
{
//chop off number from string if it ends with digit
offset = chopString(buf, sizeof buf);
int indexOfLastNum = sizeof buf - offset - 1;// -1 to not copy '\0'
//printf("buffer: %s\n", buf);
token = strtok(buf, separators);
while (token != NULL)
{
int num = atoi(token);
//printf("%d\n", num);
int_vector_add(a, num);
token = strtok(NULL, separators);
}
memcpy(buf, &buf[indexOfLastNum], offset);
}
int min = findMin(a);
int b[10] = {0};
bucketize(a, b);
for(int i = 0; i < NUM_BUCKETS; i++)
{
//printf("index: %d count: %d\n", i, b[i]);
}
const int ARRAY_BYTES = sizeof(int) * a->size;
// declare GPU memory pointers
int * d_in, * d_intermediate, * b_intermediate, *a_out, *b_out, *c_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES * NUM_BUCKETS); // overallocated
cudaMalloc((void **) &b_intermediate, ARRAY_BYTES * NUM_BUCKETS);
//cudaMalloc((void **) &d_out, sizeof(int));
cudaMalloc((void **) &a_out, sizeof(int) * NUM_BUCKETS);
cudaMalloc((void **) &b_out, sizeof(int) * NUM_BUCKETS);
cudaMalloc((void **) &c_out, sizeof(int) * NUM_BUCKETS);
//free CPU memory
//cudaMemset(b_out, 0, sizeof(int) * NUM_BUCKETS);
cudaMemset(b_out, 0, sizeof(int) * NUM_BUCKETS);
cudaMemset(a_out, 0, sizeof(int) * NUM_BUCKETS);
cudaMemset(b_intermediate, 0, ARRAY_BYTES * NUM_BUCKETS);//
cudaMemset(d_intermediate, 0, ARRAY_BYTES * NUM_BUCKETS);//
// transfer the input array to the GPU
cudaMemcpy(d_in, a->elements, ARRAY_BYTES, cudaMemcpyHostToDevice);
int_vector_free(a);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// launch the kernel
reduce_a(a_out, b_intermediate, d_intermediate, d_in, a->size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventRecord(start, 0);
reduce_b(b_out, d_intermediate, d_in, a->size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventRecord(start, 0);
run_c(c_out, b_out, NUM_BUCKETS);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
int h_a_out[NUM_BUCKETS];
int h_b_out[NUM_BUCKETS];
int h_c_out[NUM_BUCKETS];
// copy back data from GPU
cudaMemcpy(h_a_out, a_out, sizeof(int) * NUM_BUCKETS, cudaMemcpyDeviceToHost);
cudaMemcpy(h_b_out, b_out, sizeof(int) * NUM_BUCKETS, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c_out, c_out, sizeof(int) * NUM_BUCKETS, cudaMemcpyDeviceToHost);
/****************** write to q2a.txt *******************************/
FILE *outFp = fopen(Q2A_OUT_FILE, "w");
if(outFp == NULL)
{
printf("can't open file %s to write\n", Q2A_OUT_FILE);
}
fprintf(outFp, "%d", h_a_out[0]);
for(int i = 1; i < NUM_BUCKETS; i++)
{
fprintf(outFp, ", %d", h_a_out[i]);
}
fprintf(outFp, "\n");
fclose(outFp);
/*********************** write to q2b.txt ***********************/
outFp = fopen(Q2B_OUT_FILE, "w");
if(outFp == NULL)
{
printf("can't open file %s to write\n", Q2A_OUT_FILE);
}
fprintf(outFp, "%d", h_b_out[0]);
for(int i = 1; i < NUM_BUCKETS; i++)
{
fprintf(outFp, ", %d", h_b_out[i]);
}
fprintf(outFp, "\n");
fclose(outFp);
/***********************debug parallel prefix sum*********************/
//printf("parallel prefix sum cpu: ");
vector * wrapper = (vector*) malloc(sizeof(vector));
int_vector_init(wrapper);
for (int i = 0; i < NUM_BUCKETS; i++)
{
int_vector_add(wrapper, h_b_out[i]);
}
int cpu_prefix_out[10] = {0};
prefixSum(wrapper, cpu_prefix_out);
//printf("%d", cpu_prefix_out[0]);
for(int i = 1; i < NUM_BUCKETS; i++)
{
//printf( ", %d", cpu_prefix_out[i]);
}
//printf("\n");
/************************write to q2c.txt***********************************/
outFp = fopen(Q2C_OUT_FILE, "w");
if(outFp == NULL)
{
printf("can't open file %s to write\n", Q2C_OUT_FILE);
}
fprintf(outFp, "%d", h_c_out[0]);
for(int i = 1; i < NUM_BUCKETS; i++)
{
fprintf(outFp, ", %d", h_c_out[i]);
}
fprintf(outFp, "\n");
fclose(outFp);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(b_intermediate);
cudaFree(a_out);
cudaFree(b_out);
cudaFree(c_out);
return 0;
}
void int_vector_init(vector *vector) {
if(vector == NULL)
{
return;
}
vector -> elements = (int*)malloc(sizeof( int));
vector -> capacity = 1;
vector -> size = 0;
}
int int_vector_add(vector* vector, int element) {
if(vector->size + 1 == vector->capacity)
{
int *temp = (int*)realloc(vector->elements, vector->capacity*2 * sizeof (int));
if(temp == NULL)
{
return 0;
}
vector -> capacity *= 2;
vector -> elements = temp;
}
vector -> elements[vector->size] = element;
vector -> size += 1;
return 1;
}
void int_vector_free(vector *vector){
free(vector->elements);
free(vector);
}
//input: a array of int; output: b array of 10 int
void bucketize(vector *a, int *b) {
for(int i = 0; i < a->size; i++)
{
int index = a->elements[i] / 100;
b[index] += 1;
}
}
//goal is a running sum of a
//b must have 10 elements
void prefixSum(vector *a, int *b) {
int * elements = a->elements;
vector * firstPass = (vector*) malloc(sizeof(vector));
int_vector_init(firstPass);
int size = a->size;
b[0] = elements[0];
for(int i = 1; i < size; i++)
{
//int_vector_add(firstPass, sum);
b[i] = b[i-1] + elements[i];
}
}
//assumes vector size >= 1
int findMin(vector* vector) {
int min = INT_MAX;
if(vector == NULL)
{
return min;
}
int size = vector->size;
int* arr = vector->elements;
for(int i = 0; i < size; i++)
{
if(arr[i] < min)
{
min = arr[i];
}
}
return min;
}
//returns offset - difference between size and index of last number and offset
int chopString(char *buf, size_t size){
int offset = 0;
int indexOfLastNum = size-2;
if(isdigit(buf[size-2]))
{
int index = size-2;
while(isdigit(buf[index]) && index > 0)
{
index--;
}
buf[index] = '\0';
indexOfLastNum = index+1;
offset = size - indexOfLastNum -1;//-1 to not copy '\0'
} else
{
offset = 0;
}
return offset;
}
|
11,839 | #include "merge_sort_cpu.cuh"
void mergeSortAscCpu(float* arr, int length, float* out)
{
if (length < 2) {
out[0] = arr[0];
return;
}
//splitting of the arrays
int halfSize = length / 2;
int length_left = halfSize;
int length_right = length - halfSize;
float* leftPart = new float[length_left];
float* rightPart = new float[length_right];
for (int i = 0; i < length; i++)
{
if (i < halfSize)
{
//copying of the left part
leftPart[i] = arr[i];
}
else {
//copying of the right part
rightPart[i - halfSize] = arr[i];
}
}
float* out_left = new float[length_left];
float* out_right = new float[length_right];
mergeSortAscCpu(leftPart, length_left, out_left);
mergeSortAscCpu(rightPart, length_right, out_right);
float* out_temp = new float[length];
mergeArraysAscCpu(out_left, out_right, length_left, length_right, out_temp);
for (int i = 0; i < length; i++)
{
out[i] = out_temp[i];
}
}
void mergeArraysAscCpu(float* arr_left, float* arr_right, int length_left, int length_right, float* out)
{
int totalLength = length_left + length_right;
//running indices
int i = 0;
int j = 0;
int index = 0;
while (i < length_left && j < length_right)
{
if (arr_left[i] <= arr_right[j])
{
out[index] = arr_left[i];
i++;
index++;
}
else {
out[index] = arr_right[j];
j++;
index++;
}
}
//only one of these two loops will run
while (i < length_left)
{
out[index] = arr_left[i];
index++;
i++;
}
while (j < length_right)
{
out[index] = arr_right[j];
index++;
j++;
}
} |
11,840 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
// a struct
typedef struct
{
int size;
char *_string;
} string_t;
__global__ void string_append(string_t*, string_t*, string_t*);
int main(void)
{
string_t *str1, *str2, *str3;
size_t size1 = strlen("Hello, ");
size_t size2 = sizeof("world!");
// allocate memory for str1, str2, and str3
// and for their string members.
cudaMallocManaged(&str1, sizeof(string_t));
cudaMallocManaged(&(str1->_string), size1*sizeof(char));
cudaMallocManaged(&str2, sizeof(string_t));
cudaMallocManaged(&(str2->_string), size2*sizeof(char));
cudaMallocManaged(&str3, sizeof(string_t));
cudaMallocManaged(&(str3->_string), (size1+size2) * sizeof(char));
// the CPU and the GPU can directly access memory allocated to str's
//In the following 4 lines the Host is accessing
// the memory allocated via cudaMallocManaged().
str1->size = size1;
memcpy(str1->_string, "Hello, ", size1);
str2->size = size2;
memcpy(str2->_string, "world!", size2);
// Since the kernel has been declared/defined with
// "__global__" keyword we are sure it will run on the device.
// So the device will be accessing memory allocated via cudaMallocManaged().
string_append<<<1, 1>>>(str1, str2, str3);
// the device and the host should be synchronized
// before we call printf in the host.
cudaDeviceSynchronize();
printf("%s\n", str3->_string);
exit(EXIT_SUCCESS);
}
// a kernel to append 2 strings in a 3rd string.
__global__ void string_append(string_t *s1, string_t *s2, string_t *s3)
{
memcpy(s3->_string, s1->_string, s1->size);
memcpy(s3->_string+(s1->size), s2->_string, s2->size);
}
|
11,841 | #include "solve_jacobi.cuh"
//#include <stdio.h>
__global__ void solve1(float* dx, float* dA, float* db, float* dnextX, int size) {
//printf("kernel launched!\n");
float sum = 0;
for (int j = 0; j < size; j++) {
if (threadIdx.x != j) {
sum += dA[threadIdx.x*size+j] * dx[j];
}
}
dnextX[threadIdx.x] = (db[threadIdx.x] - sum) / dA[threadIdx.x*size+threadIdx.x];
}
__global__ void solve2(float* dx, float* dA, float* db, float* dnextX, int size) {
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
//int A_index = tidx;
float sum = 0;
for (int j = 0; j < size; j++) {
if (tidx != j) sum += dA[tidx*size + j] * dx[j];
}
dnextX[tidx] = (db[tidx] - sum) / dA[tidx*size + tidx];
}
__global__ void solve3(float* dx, float* dA, float* db, float* dnextX, int size) {
extern __shared__ float shared_dx[];
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
//write to shared memory
for(int i=threadIdx.x; i < size; i = i+blockDim.x)
{
shared_dx[i] = dx[i];
}
__syncthreads();
float sum = 0;
for (int j = 0; j < size; j++) {
if (tidx != j) sum += dA[tidx*size + j] * shared_dx[j];
}
dnextX[tidx] = (db[tidx] - sum) / dA[tidx*size + tidx];
}
// loop unrolling 2
__global__ void solve4(float* dx, float* dA, float* db, float* dnextX, int size) {
extern __shared__ float shared_dx[];
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
//write to shared memory
for(int i=threadIdx.x; i < size; i = i+blockDim.x)
{
shared_dx[i] = dx[i];
}
__syncthreads();
float sum = 0;
for (int j = 0; j < size; j = j+2) {
if (tidx != j) sum += dA[tidx*size + j] * shared_dx[j];
if (tidx != j+1) sum += dA[tidx*size + j+1] * shared_dx[j+1];
}
dnextX[tidx] = (db[tidx] - sum) / dA[tidx*size + tidx];
}
__global__ void solve5(float* dx, float* dA, float* db, float* dnextX, int size) {
extern __shared__ float shared_dx[];
} |
11,842 | extern "C" {
__global__ void Empty(volatile unsigned int* const clockOut)
{
volatile unsigned int clockValue = clock();
if(threadIdx.x == 0)
clockOut[blockIdx.x] = clock() - clockValue;
}
} // extern "C"
|
11,843 | #include "includes.h"
__global__ void cudaGetShiftedMidPrice(int N_inst, int batch_size, float *alphas, float *mid, float *shifted_prc){
int b_sz = blockDim.x, b_id = blockIdx.x, t_id = threadIdx.x;
if(b_id < N_inst){
for(int i=t_id; i<batch_size; i += b_sz){
shifted_prc[b_id * batch_size + i] = (1. + alphas[b_id * batch_size + i]) * mid[i];
}
}
} |
11,844 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include "device_launch_parameters.h"
__global__ void avg_fn(int *a, int N)
{
}
extern void average_filter(int *a, int N)
{
} |
11,845 | /*
============================================================================
Mantas Miežinas, IFF-2
LD4b
============================================================================
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <fstream>
#include <iomanip>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
//Konstantos
const char DataFile[] = "MiezinasM_L4.txt";
const int MAX_THREADS = 5;
const int MAX_ARRAY_SIZE = 5;
//Struktura gamintojo modeliams saugoti
struct model {
char name[75];
int quantity;
double price;
};
//Konteinerine struktura saugoti modeliams
struct manufacturer {
char name[15];
int quantity;
model models[MAX_ARRAY_SIZE];
};
//Funkciju prototipai
void ReadFile(string filename, thrust::host_vector<manufacturer> &AllModels);
void PrintTable(thrust::host_vector<manufacturer> printOut);
void PrintResults(thrust::host_vector<model> printOut);
void Plus(thrust::device_vector<manufacturer> &manu, thrust::device_vector<model> &resultsArray);
int main() {
thrust::host_vector<manufacturer> AllModels(MAX_THREADS);
thrust::host_vector<model> results(MAX_ARRAY_SIZE);
ReadFile(DataFile, AllModels);
PrintTable(AllModels);
//Nusinuliname rezultatu masyvo elementus
for (int i = 0; i < MAX_ARRAY_SIZE; i++){
model data;
strcpy(data.name, "");
data.price = 0.0;
data.quantity = 0;
results[i] = data;
}
//Perkeliame duomenis is RAM i VRAM
thrust::device_vector<manufacturer> manu = AllModels;
thrust::device_vector<model> resultsArray = results;
//Iskvieciame sumuojancia funkcija
Plus(manu, resultsArray);
cudaDeviceSynchronize();
//Persikeliame rezultatus is VRAM(GPU) i RAM(CPU)
results = resultsArray;
//Atspausdiname rezultatus
PrintResults(results);
system("Pause");
return 0;
}
/*
============================================================================
ReadFile
Pradiniu duomenu nuskaitymo funkcija, per nuoroda grazina manufacturer
strukturos host vektoriu (CPU atmintyje)
============================================================================
*/
void ReadFile(string filename, thrust::host_vector<manufacturer> &AllModels) {
string title;
int count, j;
j = 0;
ifstream fin(filename);
if (!fin) {
cerr << "Couldn't open file!\n";
}
else {
while (!fin.eof()){
fin >> title >> count;
strcpy(AllModels[j].name, title.c_str());
//AllModels[j].name = title;
AllModels[j].quantity = count;
model models[MAX_ARRAY_SIZE];
for (int i = 0; i < count; i++){
model modelis;
fin >> modelis.name >> modelis.quantity >> modelis.price;
AllModels[j].models[i] = modelis;
}
j++;
}
fin.close();
}
}
/*
============================================================================
PrintTable
Atspausdina pradinius duomenis lentelemis
============================================================================
*/
void PrintTable(thrust::host_vector<manufacturer> printOut){
cout << "-----------------------------------------------------------------------------\n";
for (manufacturer & manu : printOut){
cout << right << setw(35) << manu.name << "\n";
cout << "-----------------------------------------------------------------------------\n";
cout << left << setw(63) << "Modelio Pavadinimas"
<< setw(8) << "Kiekis"
<< setw(5) << "Kaina" << "\n";
cout << "-----------------------------------------------------------------------------\n";
for (int i = 0; i < manu.quantity; i++){
model forPrinting = manu.models[i];
cout << left << setw(3) << to_string(i + 1) + ")"
<< setw(60) << forPrinting.name
<< setw(8) << forPrinting.quantity
<< setw(5) << setprecision(4) << forPrinting.price
<< "\n";
}
cout << "-----------------------------------------------------------------------------\n";
}
}
/*
============================================================================
PrintResults
Atspausdina rezultatu vektoriu lenteleje
============================================================================
*/
void PrintResults(thrust::host_vector<model> printOut) {
cout << "-----------------------------------------------------------------------------\n";
cout << left << setw(63) << "Modelio Pavadinimas"
<< setw(8) << "Kiekis"
<< setw(5) << "Kaina" << "\n";
cout << "-----------------------------------------------------------------------------\n";
for (int i = 0; i < MAX_ARRAY_SIZE; i++) {
model forPrinting = printOut[i];
cout << left << setw(3) << to_string(i + 1) + ")"
<< setw(60) << forPrinting.name
<< setw(8) << forPrinting.quantity
<< setw(5) << setprecision(4) << forPrinting.price
<< "\n";
}
}
/*
============================================================================
Plus
Susumuoja pradiniu duomenu masyvus i viena masyva ir grazina rezultata per
nuoroda. Naudojami device vektoriai, reiskias naudojama atmintis priklauso
GPU.
============================================================================
*/
void Plus(thrust::device_vector<manufacturer> &manu, thrust::device_vector<model> &resultsArray) {
for (int i = 0; i < manu.size(); i++) {
for (int j = 0; j < resultsArray.size(); j++) {
model data = resultsArray[i];
manufacturer addData = manu[j];
strcat(data.name, addData.models[i].name);
data.price += addData.models[i].price;
data.quantity += addData.models[i].quantity;
resultsArray[i] = data;
}
}
} |
11,846 | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#define NUMTHREADSPERBLOCK 16
/*** function declarations ***/
void PRINT_MAT(int P, int M, double * matr);
// save matrix to file
void save_gnuplot( double *M, size_t dim );
// evolve Jacobi
__global__ void evolve( double * matrix, double *matrix_new, int dimension );
// return the elapsed time
double seconds( void );
/*** end function declaration ***/
int main(int argc, char* argv[]){
// timing variables
double t_start, t_end, increment;
// indexes for loops
size_t i, j, it;
// initialize matrix
double *matrix, *matrix_new, *tmp_matrix;
double *d_matrix, *d_matrix_new;
int dimension = 0, iterations = 0, row_peek = 0, col_peek = 0;
size_t byte_dimension = 0;
// check on input parameters
if(argc != 5) {
fprintf(stderr,"\nwrong number of arguments. Usage: ./a.out dim it n m\n");
return 1;
}
dimension = atoi(argv[1]);
iterations = atoi(argv[2]);
row_peek = atoi(argv[3]);
col_peek = atoi(argv[4]);
printf("matrix size = %zu\n", dimension);
printf("number of iterations = %zu\n", iterations);
printf("element for checking = Mat[%zu,%zu]\n",row_peek, col_peek);
if((row_peek > dimension) || (col_peek > dimension)){
fprintf(stderr, "Cannot Peek a matrix element outside of the matrix dimension\n");
fprintf(stderr, "Arguments n and m must be smaller than %zu\n", dimension);
return 1;
}
byte_dimension = sizeof(double*) * ( dimension + 2 ) * ( dimension + 2 );
matrix = ( double* )malloc( byte_dimension );
matrix_new = ( double* )malloc( byte_dimension );
memset( matrix, 0, byte_dimension );
memset( matrix_new, 0, byte_dimension );
cudaMalloc((void**) &d_matrix, byte_dimension ); //allocating space for d_matrix
cudaMalloc((void**) &d_matrix_new, byte_dimension );
//fill initial values
for( i = 1; i <= dimension; ++i ){
for( j = 1; j <= dimension; ++j ){
matrix[ ( i * ( dimension + 2 ) ) + j ] = 0.5;
}
}
// set up borders
increment = 100.0 / ( dimension + 1 );
for( i=1; i <= dimension+1; ++i ){
matrix[ i * ( dimension + 2 ) ] = i * increment;
matrix[ ( ( dimension + 1 ) * ( dimension + 2 ) ) + ( dimension + 1 - i ) ] = i * increment;
matrix_new[ i * ( dimension + 2 ) ] = i * increment;
matrix_new[ ( ( dimension + 1 ) * ( dimension + 2 ) ) + ( dimension + 1 - i ) ] = i * increment;
}
//printf("initial matrix\n");
//PRINT_MAT(dimension+2,dimension+2,matrix);
// start algorithm
t_start = seconds();
cudaMemcpy( d_matrix, matrix, byte_dimension, cudaMemcpyHostToDevice );
cudaMemcpy( d_matrix_new, matrix_new, byte_dimension, cudaMemcpyHostToDevice );
dim3 gridDim( (dimension+NUMTHREADSPERBLOCK)/NUMTHREADSPERBLOCK, (dimension+NUMTHREADSPERBLOCK)/NUMTHREADSPERBLOCK );
dim3 blockDim(NUMTHREADSPERBLOCK , NUMTHREADSPERBLOCK);
for( it = 0; it < iterations; ++it ){
evolve<<< gridDim, blockDim >>>( d_matrix, d_matrix_new, dimension );
//evolve<<< (dimension+NUMTHREADSPERBLOCK)/NUMTHREADSPERBLOCK , NUMTHREADSPERBLOCK >>>( d_matrix, d_matrix_new, dimension );
// swap the pointers
tmp_matrix = d_matrix;
d_matrix = d_matrix_new;
d_matrix_new = tmp_matrix;
}
cudaMemcpy( matrix, d_matrix, byte_dimension, cudaMemcpyDeviceToHost );
t_end = seconds();
//printf("final matrix\n");
//PRINT_MAT(dimension+2,dimension+2,matrix);
printf( "\nelapsed time = %f seconds\n", t_end - t_start );
printf( "\nmatrix[%zu,%zu] = %f\n", row_peek, col_peek, matrix[ ( row_peek + 1 ) * ( dimension + 2 ) + ( col_peek + 1 ) ] );
save_gnuplot( matrix, dimension );
free( matrix );
free( matrix_new );
cudaFree(d_matrix);
cudaFree(d_matrix_new);
return 0;
}
void PRINT_MAT(int P, int M, double * matr){
for(int j = 0; j < P; j++ ){
for(int i = 0; i < M; i++ ){
printf("%0.1f ",matr[i+j*M]);
}
printf("\n");
}
}
__global__ void evolve( double * matrix, double * matrix_new, int dimension ){
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
int idy = threadIdx.y + (blockIdx.y * blockDim.y);
if (idx>0 && idx<=dimension){
if(idy>0 && idy<=dimension){
matrix_new[ ( idx * ( dimension + 2 ) ) + idy ] = ( 0.25 ) *
( matrix[ ( ( idx - 1 ) * ( dimension + 2 ) ) + idy ] +
matrix[ ( idx * ( dimension + 2 ) ) + ( idy + 1 ) ] +
matrix[ ( ( idx + 1 ) * ( dimension + 2 ) ) + idy ] +
matrix[ ( idx * ( dimension + 2 ) ) + ( idy - 1 ) ] );
}
}
}
void save_gnuplot( double *M, size_t dimension ){
size_t i , j;
const double h = 0.1;
FILE *file;
file = fopen( "solution.dat", "w" );
for( i = 0; i < dimension + 2; ++i )
for( j = 0; j < dimension + 2; ++j )
fprintf(file, "%f\t%f\t%f\n", h * j, -h * i, M[ ( i * ( dimension + 2 ) ) + j ] );
fclose( file );
}
// A Simple timer for measuring the walltime
double seconds(){
struct timeval tmp;
double sec;
gettimeofday( &tmp, (struct timezone *)0 );
sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0;
return sec;
}
|
11,847 | #include <stdio.h>
#include <malloc.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
void Mul(float *A, float *B, int hA, int wA, int wB, float *C)
{
int i,j,k;
for (i=0; i<hA; i++)
for (j=0; j<wB; j++){
C[i*wB+j] = 0.0;
for (k=0; k<wA; k++)
C[i*wB+j] += A[i*wA+k]*B[k*wB+j];
}
}
__global__ void MulGpu(float *A, float *B, float *C, int hA, int wA, int wB)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if(i < hA && j < wB) for(int k = 0; k < wA ; k++) C[i*wB+j] += A[i*wA+k]*B[k*wB+j];
}
void init_matrix(float *M, int hM, int wM, float k)
{
int i,j;
for (i=0; i<hM; i++)
for (j=0; j<wM; j++)
if (i==j)
M[i*wM+j] = k*1.0f;
else
M[i*wM+j] = -1.0f/(float)(wM);
}
void print_matrix(float *M, int hM, int wM)
{
int i,j;
for (i=0; i<hM; i++){
// printf("Line %i: ", i);
for (j=0; j<wM; j++)
printf("%4.1f ", M[i*wM+j]);
printf("\n");
}
}
int diff(float *A, float *B, int hA, int wA, int wB, float *C)
{
float *C_cpu;
int size_C = wB * hA;
C_cpu = (float*)malloc(size_C*sizeof(float));
int i,j,k;
for (i=0; i<hA; i++)
for (j=0; j<wB; j++){
C_cpu[i*wB+j] = 0.0;
for (k=0; k<wA; k++){
C_cpu[i*wB+j] += A[i*wA+k]*B[k*wB+j];
}
}
//printf("\n\nMATRIX C_cpu\n");print_matrix(C_cpu, hA, wB);
for (i=0; i<hA; i++)
for (j=0; j<wB; j++)
if (fabsf(C_cpu[i*wB+j]-C[i*wB+j])>1e-5)
{
printf("[%i,%i]: %f!=%f\n", i, j, C_cpu[i*wB+j], C[i*wB+j]);
return(0);
}
return(1);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
// Matrix variables
float *A, *B, *C;
float *A_G, *B_G, *C_G;
int hA, wA, hB, wB;
int i;
setbuf(stdout, NULL);
if (argc!=4){
printf("./exec hA hB/WA wB\n");
exit(-1);
}
hA = atoi(argv[1]);
hB = wA = atoi(argv[2]);
wB = atoi(argv[3]);
// Init A and B, malloc C
int size_A = wA * hA;
A = (float*)malloc(size_A*sizeof(float));
init_matrix(A, hA, wA, 1.0);
int size_B = wB * hB;
B = (float*)malloc(size_B*sizeof(float));
init_matrix(B, hB, wB, 2.0);
int size_C = wB * hA;
C = (float*)malloc(size_C*sizeof(float));
for (i = 0; i < (hA*wB); i++) {
C[i] = 0.0;
}
cudaMalloc((void**) &A_G, sizeof (float)*hA*wA);
cudaMalloc((void**) &B_G, sizeof (float)*hB*wB);
cudaMalloc((void**) &C_G, sizeof (float)*hA*wB);
cudaMemcpy(A_G,A,sizeof (float)*hA*wA,cudaMemcpyHostToDevice);
cudaMemcpy(B_G,B,sizeof (float)*hB*wB,cudaMemcpyHostToDevice);
cudaMemcpy(C_G,C,sizeof (float)*hA*wB,cudaMemcpyHostToDevice);
dim3 nThreads_per_block(32,32);
dim3 nBlocks;
if ((hA*wB)%32==0)
nBlocks = dim3((hA*wB)/32,(hA*wB)/32);
else
nBlocks = dim3((hA*wB)/32 +1,(hA*wB)/32 +1);
MulGpu<<<nBlocks,nThreads_per_block>>>(A_G, B_G, C_G, hA, wA, wB);
//Mul(A, B, hA, wA, wB, C);
cudaMemcpy(C,C_G,sizeof (float)*hA*wB,cudaMemcpyDeviceToHost);
//printf("\n\nMATRIX A\n");print_matrix(A, hA, wA);
//printf("\n\nMATRIX B\n");print_matrix(B, hB, wB);
//printf("\n\nMATRIX C\n");print_matrix(C, hA, wB);
if (!diff(A, B, hA, wA, wB, C))
printf("ERROR=GPU.vs.CPU matrix mult differs\n");
// print Matrix
//printf("\n\nMATRIX A\n");print_matrix(A, hA, wA);
//printf("\n\nMATRIX B\n");print_matrix(B, hB, wB);
//printf("\n\nMATRIX C\n");print_matrix(C, hA, wB);
return (1);
}
|
11,848 | #include "stdio.h"
#define N 4
#define M 4
#define thx 2
#define thy 2
__global__ void add( int *a, int *b, int *c )
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int ind = i * N + j;
c[ind] = a[ind] + b[ind];
}
int main() {
int *a, *b, *c;
cudaMallocManaged(&a, M*N*sizeof(int));
cudaMallocManaged(&b, M*N*sizeof(int));
cudaMallocManaged(&c, M*N*sizeof(int));
for (int i = 0; i < M * N; i++)
{
a[i] = i;
b[i] = 2 * i;
}
dim3 blocks(N / thx, M / thy);
dim3 threads(thx, thy);
add<<< blocks, threads >>>(a, b, c);
cudaDeviceSynchronize();
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++)
printf("%d ", c[i*N + j]);
printf("\n");
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
11,849 | #include "includes.h"
#define number_type unsigned long long
const int block_size = 1024; // 2**10 threads
const int thread_size = 32768 * 2 * 2; // 2**15 max elements per thread always keep even number
const number_type max_chunk_size = pow(2, 31) + pow(2, 30); // 2**31 items cause reduce ram use else failed allocations, always keep even number
cudaError_t find_primes_cuda(number_type n, number_type r);
void set_one(char* dev_arr, unsigned int size);
template <typename T>
void reset(T* dev_arr, size_t count);
template <typename T>
T* device(size_t count);
template <typename T>
T* host(size_t count);
void confirmCudaNoError();
void cudaWait();
template <typename T>
T* to_host(const T* dev_ptr, size_t count, T* host_ptr = nullptr);
template <typename T>
T* to_device(const T* host_ptr, size_t count, T* dev_ptr = nullptr);
//__global__ void markNonPrimeKernel(char* dev_chunk, number_type* min_primes, number_type currentValue, number_type currentValueSqr,
// const number_type startValue, const number_type endValue, const int thread_size)
//{
// const auto myThreadId = blockIdx.x * block_size + threadIdx.x;
// const auto myStartValue = startValue + myThreadId * thread_size;
// auto myEndValue = myStartValue + thread_size;
__global__ void getNextPrimeFast(char* dev_chunk, number_type currentValue, const number_type startValue, const number_type endValue, number_type* d_ans)
{
auto threadId = threadIdx.x;
if (threadId == 0)
{
for (auto i = currentValue + 1; i < endValue; i++)
{
auto number = dev_chunk[i];
if (number == 1)
{
*d_ans = i;
return;
}
}
}
} |
11,850 | #include <stdio.h>
#define ERRORCHECK()\
cudaDeviceSynchronize();\
cudaError_t error = cudaGetLastError();\
if(error != cudaSuccess)\
{\
printf("CUDA error: %s\n", cudaGetErrorString(error));\
exit(-1);\
}\
__global__ void MyKernel(float* devPtr, size_t pitch, int width, int height)
{
for(int r=0; r<height; ++r){
float* row = (float*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; ++c){
row[c] = 17.3;
}
}
}
int main(void)
{
int width = 64, height = 64;
float* devPtr;
size_t pitch;
cudaMallocPitch(&devPtr, &pitch, width * sizeof(float), height);
MyKernel<<<100, 512>>>(devPtr, pitch, width, height);
}
|
11,851 | #include <cuda.h>
#include <stdio.h>
#include <stdint.h>
// For comparisons
//#include "seqScan.c"
#define CLONES 8
#define REPS 8
// block size in ELEMENTS!
#define BLOCK_SIZE (CLONES*REPS*64)
#define N 4096*CLONES*REPS*64
/* ------------------------------------------------------------------------
Unrolled in-place(shared memory) Scan without syncs (32 threads, 64 elts).
Needs 2*64 elements of shared memory storage (512bytes).
(shared mem is 49152 bytes, but you share it with other blocks on an MP)
--------------------------------------------------------------------- */
__device__ void skl_scan(int i,
float* input,
float *output,
float *s_data, // The shared memory
float *maxs) {
int tid = threadIdx.x;
int tids = tid << 1;
// Load data from global memory into shared memory (in two separate load ops)
s_data[tid*2] = input[tid*2];
s_data[tid*2+1] = input[tid*2+1];
// __syncthreads();
s_data[tids | 1] += s_data[tids];
s_data[(tids | 3) - (tid & 1)] += s_data[tids & 0xFFFFFFFC | 1];
s_data[(tids | 7) - (tid & 3)] += s_data[tids & 0xFFFFFFF8 | 3];
s_data[(tids | 15) - (tid & 7)] += s_data[tids & 0xFFFFFFF0 | 7];
s_data[(tids | 31) - (tid & 15)] += s_data[tids & 0xFFFFFFE0 | 15];
s_data[(tids | 63) - (tid & 31)] += s_data[tids & 0xFFFFFFC0 | 31];
// NO Interleaved SYNCS here.
//__syncthreads();
output[tid*2] = s_data[tid*2];
output[tid*2+1] = s_data[tid*2+1];
//__syncthreads();
if(tid % 32 == 0)
maxs[(i<<3)+(tid>>5)] = s_data[(tid << 1) | 0x3F];
//maxs[i*CLONES+(tid / 32)] = s_data[(tid / 32)*64 + 63];
// (i<<3)+(tid>>5) ((tid>>5)<<6) + 63
// (tid << 1) | 0x3F)
}
/* ------------------------------------------------------------------------
The Scan kernel (Thousand(s) of elements. NO SYNCS AT ALL)
--------------------------------------------------------------------- */
__global__ void kernel(float* input0,
float* output0,
float* maxout){
// shared data. (two different kinds. warp local and across warps.)
extern __shared__ float s_data[];
float *maxs = &s_data[512];
// Sequentially execute 64 scans
for (int i = 0; i < REPS; i ++) {
skl_scan(i,
input0+(blockIdx.x*BLOCK_SIZE)+(i*512),
output0+(blockIdx.x*BLOCK_SIZE)+(i*512),
s_data,maxs);
}
// Now needs one __syncthreads() here!
__syncthreads();
// in parallel scan the maximum array
float v; //discard this value.
if (threadIdx.x < 32)
skl_scan(0,maxs,maxs,(float *)s_data,&v);
__syncthreads();
// really messy code
for (int j = 0; j < REPS; j ++) {
if (j != 0 || threadIdx.x >= 64)
output0[(blockIdx.x*BLOCK_SIZE)+(j*256)+threadIdx.x] += maxs[(((j*256)+threadIdx.x) / 64)-1];
output0[(blockIdx.x*BLOCK_SIZE)+(j*256)+threadIdx.x+2048] += maxs[(((j*256)+threadIdx.x+2048) /64)-1];
}
// This is a debug step.
if (threadIdx.x < 32) {
maxout[threadIdx.x] = maxs[threadIdx.x];
maxout[threadIdx.x+32] = maxs[threadIdx.x+32];
}
}
/* ------------------------------------------------------------------------
MAIN
--------------------------------------------------------------------- */
int main(void) {
float *v;
float *r;
//float rc[N];
float m[64];
float *dv;
float *dr;
float *dm;
v = (float*)malloc(sizeof(float) * N);
r = (float*)malloc(sizeof(float) * N);
memset(m,0,64*sizeof(float));
for (int i = 0; i < N; i ++) {
v[i] = 1.0;
r[i] = 7.0;
}
cudaMalloc((void**)&dv,N*sizeof(float));
cudaMalloc((void**)&dr,N*sizeof(float));
cudaMalloc((void**)&dm,64*sizeof(float));
cudaMemcpy(dv,v,N*sizeof(float),cudaMemcpyHostToDevice);
//kernel<<<1,32,32*3*(sizeof(float))>>>(dv,dr,dm);
//kernel<<<1,16,32*2*(sizeof(float))>>>(dv,dr,dm);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
kernel<<<4096,256,(512+64)*(sizeof(float))>>>(dv,dr,dm);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// std::cout << std::endl;
cudaMemcpy(r,dr,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(m,dm,64*sizeof(float),cudaMemcpyDeviceToHost);
for (int i = 0; i < 4096 /*N*/; i ++) {
printf("%f ",r[i]);
}
printf("\n ------ \n");
for (int i = 0; i < 64; i ++) {
printf("%f ",m[i]);
}
printf("Elapsed time: %f\n", elapsedTime);
//seqScan(v,rc,N);
//int s = compare(rc,r,0.01,N);
//printf ("\n%s\n", s? "same" : "not the same");
return 0;
}
|
11,852 | extern "C"
__global__ void query(int size, int subjectQuery, int predicateQuery, int objectQuery, int contextQuery, int *subjects, int *predicates, int *objects, int *contexts, int *result)
{
// Get thread index
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Don't operate on memory outside of array
if (i<size){
// Add 1 to the result if subject, predicate, object, or context are a match or if they are a wild card (variable)
int accumulator = 0;
// subject
accumulator += ((subjectQuery == subjects[i]) ? 1 : 0);
accumulator += ((subjectQuery == -1) ? 1 : 0);
// predicate
accumulator += ((predicateQuery == predicates[i]) ? 1 : 0);
accumulator += ((predicateQuery == -1) ? 1 : 0);
// object
accumulator += ((objectQuery == objects[i]) ? 1 : 0);
accumulator += ((objectQuery == -1) ? 1 : 0);
// context
accumulator += ((contextQuery == contexts[i]) ? 1 : 0);
accumulator += ((contextQuery == -1) ? 1 : 0);
result[i] = accumulator;
}
} |
11,853 | #include<stdio.h>
#include<cuda.h>
#include <stdlib.h>
#include <iostream>
#include <time.h>
#include <math.h>
#define N 100000
using namespace std;
static const long BLK_SIZE =1000 ;
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void sort(int *c,int *count)
{
int l;
if(*count%2==0)
l=*count/2;
else
l=(*count/2)+1;
for(int i=0;i<l;i++)
{
if(threadIdx.x%2==0) //even phase
{
if(c[threadIdx.x]>c[threadIdx.x+1])
{
int temp=c[threadIdx.x];
c[threadIdx.x]=c[threadIdx.x+1];
c[threadIdx.x+1]=temp;
}
__syncthreads();
}
else //odd phase
{
if(c[threadIdx.x]>c[threadIdx.x+1])
{
int temp=c[threadIdx.x];
c[threadIdx.x]=c[threadIdx.x+1];
c[threadIdx.x+1]=temp;
}
__syncthreads();
}
}
}
void swap(int *xp, int *yp)
{
int temp = *xp;
*xp = *yp;
*yp = temp;
}
// An optimized version of Bubble Sort
void bubbleSort(int arr[], int n)
{
}
int main()
{
int a[N],b[N];
for (int i = 0; i < N; i++) {
a[i] = (float) rand() / (float) RAND_MAX * 100;
}
printf("ORIGINAL ARRAY : \n");
for(int i=0;i<N;i++)
{
printf("%d ",a[i]);
}
int *c,*count;
int k=N;
cudaMalloc((void**)&c,sizeof(int)*N);
cudaMalloc((void**)&count,sizeof(int));
cudaMemcpy(c,&a,sizeof(int)*N,cudaMemcpyHostToDevice);
cudaMemcpy(count,&k,sizeof(int),cudaMemcpyHostToDevice);
//Time kernel launch
//Time kernel launch
cudaEvent_t start, stop;
CUDA_CHECK_RETURN(cudaEventCreate(&start));
CUDA_CHECK_RETURN(cudaEventCreate(&stop));
float elapsedTime;
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
sort<<< ceil(N/(float)BLK_SIZE),BLK_SIZE >>>(c,count);
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop));
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError()); //Check if an error occurred in device code
CUDA_CHECK_RETURN(cudaEventDestroy(start));
CUDA_CHECK_RETURN(cudaEventDestroy(stop));
cout << "done.\nElapsed kernel time: " << elapsedTime << " ms\n";
cout << "Copying results back to host .... "<<endl;
cudaMemcpy(&b,c,sizeof(int)*N,cudaMemcpyDeviceToHost);
printf("\nSORTED ARRAY : \n");
for(int i=0;i<N;i++)
{
printf("%d ",b[i]);
}
//Add code to time host calculations
clock_t st, ed;
st = clock();
//bool valid = true;
//bubbleSort(a,N);
int i, j;
bool swapped;
for (i = 0; i < N-1; i++)
{
swapped = false;
for (j = 0; j < N-i-1; j++)
{
if (a[j] > a[j+1])
{
swap(&a[j], &a[j+1]);
swapped = true;
}
}
// IF no two elements were swapped by inner loop, then break
if (swapped == false)
break;
}
printf("\n");
printf("BYCPU");
printf("\n");
for(int i=0;i<N;i++)
{
printf("%d ",a[i]);
}
ed = clock() - st;
cout << "Elapsed time on host: " << ((float) ed) / CLOCKS_PER_SEC * 1000
<< " ms" << endl;
}
|
11,854 | #include "includes.h"
__global__ void testKernel(int *s, const int *re){
__shared__ int temp[1];
int i = threadIdx.x;
if (re[i] > -1 && re[i] < temp[0])
temp[0] = re[i];
__syncthreads();
*s = temp[0];
} |
11,855 | /*
Author: Jason He
Version: 1.0 20210605 Serial version.
Version: 2.0 20210607 CUDA version.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <time.h>
/*
This program solve the Stokes Flow problem- 36.1.2 in the textbook.
Inputs:
N, number of grid points in each direction.
mu, double precision viscosity.
P, double precision pressure drop.
w, double precision relaxation parameter.
tol, double precision error tolerance.
K, maximum number of iterations.
Outputs:
print arguments
output final grid value into "StokesU.out", "StokesV.out" and "StokesP.out"
print run time and output it to runtime.dat
E.g.
$ ./stokes 128 1 1 0.4 1e-9 100000
*/
//declare global variables
const int threadsPerBlock = 1024;//for NVIDIA TESLA K20c maximum is 1024
/*the findmaxres kernel demand that threadsPerBlock should be a power of 2.
Also in the three update kernels, each block compare and store the residuals into
the global array dev_res[threadsPerBlock], which only work with maximum
threadsPerBlock=1024, since in this case only one block runs at a time.*/
__device__ __constant__ int dev_N;
__device__ __constant__ double dev_mu;
__device__ __constant__ double dev_P;
__device__ __constant__ double dev_w;
__device__ __constant__ double dev_dx;
//this kernel set values of 1d length-n double array to 0
__global__ void init(double* a, int n){
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k < n){
a[k] = 0.;
}
}
//the three update functions update the grid values and return the maximum residual
__global__ void updateU(double* dev_u, double* dev_p, double* dev_res, int color){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i < dev_N*(dev_N-1)){
int j = i/(dev_N-1);
int k = i%(dev_N-1);
if((j+k)%2==color){
double r;
int jp = i + dev_N-1;//(j+1)*(dev_N-1) + k;
int jm = i - dev_N+1;//(j-1)*(dev_N-1) + k;
int kp = i + 1;//j*(dev_N-1) + k+1;
int km = i - 1;//j*(dev_N-1) + k-1;
int pi = j*(dev_N-1) + k;
int pm = pi - dev_N+1;//(j-1)*(dev_N-1) + k;
if(j==0){
if(k==0){
r = dev_mu*(dev_u[jp]+dev_u[kp]-4*dev_u[i])
-dev_dx*2*(dev_p[pi]-dev_P);
}
else if(k<dev_N-2){
r = dev_mu*(dev_u[jp]+dev_u[kp]+dev_u[km]-3*dev_u[i])
-dev_dx*2*(dev_p[pi]-dev_P);
}
else{
r = dev_mu*(dev_u[jp]+dev_u[km]-4*dev_u[i])
-dev_dx*2*(dev_p[pi]-dev_P);
}
}
else if(j<dev_N-1){
if(k==0){
r = dev_mu*(dev_u[jp]+dev_u[jm]+dev_u[kp]-5*dev_u[i])
-dev_dx*(dev_p[pi]-dev_p[pm]);
}
else if(k<dev_N-2){
r = dev_mu*(dev_u[jp]+dev_u[jm]+dev_u[kp]+dev_u[km]-4*dev_u[i])
-dev_dx*(dev_p[pi]-dev_p[pm]);
}
else{
r = dev_mu*(dev_u[jp]+dev_u[jm]+dev_u[km]-5*dev_u[i])
-dev_dx*(dev_p[pi]-dev_p[pm]);
}
}
else{
if(k==0){
r = dev_mu*(dev_u[jm]+dev_u[kp]-4*dev_u[i])
-dev_dx*2*(0-dev_p[pm]);
}
else if(k<dev_N-2){
r = dev_mu*(dev_u[jm]+dev_u[kp]+dev_u[km]-3*dev_u[i])
-dev_dx*2*(0-dev_p[pm]);
}
else{
r = dev_mu*(dev_u[jm]+dev_u[km]-4*dev_u[i])
-dev_dx*2*(0-dev_p[pm]);
}
}
dev_u[i] += dev_w*r;
dev_res[threadIdx.x] = max(fabs(r), dev_res[threadIdx.x]);
}
}
}
__global__ void updateV(double* dev_v, double* dev_p, double* dev_res, int color){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i < (dev_N-1)*dev_N){
int k = i%dev_N;
if(0<k && k<dev_N-1){
int j = i/dev_N;
if((j+k)%2==color){
double r;
int jp = i + dev_N;//(j+1)*dev_N + k;
int jm = i - dev_N;//(j-1)*dev_N + k;
int kp = i + 1;//j*dev_N + k+1;
int km = i - 1;//j*dev_N + k-1;
int pi = j*(dev_N-1) + k;
int pm = pi - 1;//j*(dev_N-1) + k-1;
if(j==0){
r = dev_mu*(dev_v[kp]+dev_v[km]+dev_v[jp]-3*dev_v[i])
-dev_dx*(dev_p[pi]-dev_p[pm]);
}
else if(j<dev_N-2){
r = dev_mu*(dev_v[kp]+dev_v[km]+dev_v[jp]+dev_v[jm]-4*dev_v[i])
-dev_dx*(dev_p[pi]-dev_p[pm]);
}
else{
r = dev_mu*(dev_v[kp]+dev_v[km]+dev_v[jm]-3*dev_v[i])
-dev_dx*(dev_p[pi]-dev_p[pm]);
}
dev_v[i] += dev_w*r;
dev_res[threadIdx.x] = max(fabs(r), dev_res[threadIdx.x]);
}
}
}
}
__global__ void updateP(double* dev_u, double* dev_v, double* dev_p, double* dev_res){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if(i < (dev_N-1)*(dev_N-1)){
int j = i/(dev_N-1);
int k = i%(dev_N-1);
int up = (j+1)*(dev_N-1) + k;
int um = j*(dev_N-1) + k;
int vp = j*dev_N + k+1;
int vm = j*dev_N + k;
double r;
r = -(dev_u[up]-dev_u[um])-(dev_v[vp]-dev_v[vm]);
dev_p[i] += dev_w*r;//update all p's
dev_res[threadIdx.x] = max(fabs(r), dev_res[threadIdx.x]);//not comfortable with this
}
}
//this kernel find the max value
__global__ void findmaxres(double* dev_res, int i){
int k = threadIdx.x + blockIdx.x*blockDim.x;
if(k%(2*i)==0){
dev_res[k] = max(dev_res[k], dev_res[k+i]);
}
}
int main(int argc, char* argv[]){
//Choose gpu device
cudaDeviceProp prop;
int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.multiProcessorCount = 13;
cudaChooseDevice(&dev, &prop);
cudaSetDevice(dev);
//load input parameters
int argi = 0;
int N = atoi(argv[++argi]);
double mu = atof(argv[++argi]);
double P = atof(argv[++argi]);
double w = atof(argv[++argi]);
double tol = atof(argv[++argi]);
int K = atoi(argv[++argi]);
printf( "N = %d\nmu = %lf\nP = %lf\nw = %lf\ntau = %e\nK = %d\n",
N, mu, P, w, tol, K);
//calculate parameters for the iteration equation
double dx = 1./(N-1);
//calculate parameters for blocks and threads
const int blocksPerGrid = (N*(N-1) + threadsPerBlock - 1)/threadsPerBlock;
printf("BlocksPerGrid = %d\nThreadsPerBlock = %d\n", blocksPerGrid, threadsPerBlock);
//copy parameters to device
cudaMemcpyToSymbol(dev_N, &N, sizeof(double));
cudaMemcpyToSymbol(dev_mu, &mu, sizeof(double));
cudaMemcpyToSymbol(dev_P, &P, sizeof(double));
cudaMemcpyToSymbol(dev_w, &w, sizeof(double));
cudaMemcpyToSymbol(dev_dx, &dx, sizeof(double));
//initialize value of the grid on device
double *dev_u, *dev_v, *dev_p;
cudaMalloc((void**)&dev_u, N*(N-1)*sizeof(double));
cudaMalloc((void**)&dev_v, (N-1)*N*sizeof(double));
cudaMalloc((void**)&dev_p, (N-1)*(N-1)*sizeof(double));
init<<<blocksPerGrid, threadsPerBlock>>>(dev_u, N*(N-1));
init<<<blocksPerGrid, threadsPerBlock>>>(dev_v, (N-1)*N);
init<<<blocksPerGrid, threadsPerBlock>>>(dev_p, (N-1)*(N-1));
//initialize grids of residual
double *dev_res;
cudaMalloc((void**)&dev_res, threadsPerBlock*sizeof(double));
//initialize parameters for iterations
int iter=0;//number of iteration
double maxres=1.0;//maximum residual of u,v,p on every grid points
float runtime;//record runtime
clock_t t;
t = clock();
//main loop
while(iter<K && maxres>tol){
init<<<1, threadsPerBlock>>>(dev_res, threadsPerBlock);
updateU<<<blocksPerGrid, threadsPerBlock>>>(dev_u, dev_p, dev_res, 0);
updateU<<<blocksPerGrid, threadsPerBlock>>>(dev_u, dev_p, dev_res, 1);
updateV<<<blocksPerGrid, threadsPerBlock>>>(dev_v, dev_p, dev_res, 0);
updateV<<<blocksPerGrid, threadsPerBlock>>>(dev_v, dev_p, dev_res, 1);
updateP<<<blocksPerGrid, threadsPerBlock>>>(dev_u, dev_v, dev_p, dev_res);
for(int i=1; i<threadsPerBlock; i*=2)
findmaxres<<<1, threadsPerBlock>>>(dev_res, i);
cudaMemcpy(&maxres, dev_res, sizeof(double), cudaMemcpyDeviceToHost);
iter++;
}
runtime = (float)(clock()-t)/CLOCKS_PER_SEC;
FILE* tfile = fopen("runtime.dat", "a");
fprintf(tfile, "%d %f\n", N, runtime/iter);
fclose(tfile);
printf("Runtime = %f seconds\nOne iteration runtime = %f seconds\n",
runtime, runtime/iter);
//print iterations and residuals
printf("number of iterations = %d\nresidual = %e\n", iter, maxres);
//initialize the grids on the host
double *u = (double*)malloc(N*(N-1)*sizeof(double));
double *v = (double*)malloc((N-1)*N*sizeof(double));
double *p = (double*)malloc((N-1)*(N-1)*sizeof(double));
//output to files
cudaMemcpy(u, dev_u, N*(N-1)*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(v, dev_v, (N-1)*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(p, dev_p, (N-1)*(N-1)*sizeof(double), cudaMemcpyDeviceToHost);
FILE* ufile = fopen("stokesU.out","w");
fwrite(u, sizeof(double), N*(N-1), ufile);
fclose(ufile);
FILE* vfile = fopen("stokesV.out","w");
fwrite(v, sizeof(double), (N-1)*N, vfile);
fclose(vfile);
FILE* pfile = fopen("stokesP.out","w");
fwrite(p, sizeof(double), (N-1)*(N-1), pfile);
fclose(pfile);
//free memories
free(u);
free(v);
free(p);
cudaFree(dev_u);
cudaFree(dev_v);
cudaFree(dev_p);
cudaFree(dev_res);
return 0;
} |
11,856 | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <stdio.h>
// Managed Base Class -- inherit from this to automatically
// allocate objects in Unified Memory
class Managed
{
public:
void *operator new(size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
cudaDeviceSynchronize();
return ptr;
}
void operator delete(void *ptr) {
cudaDeviceSynchronize();
cudaFree(ptr);
}
};
// String Class for Managed Memory
class String : public Managed
{
int length;
char *data;
public:
String() : length(0), data(0) {}
// Constructor for C-string initializer
String(const char *s) : length(0), data(0) {
_realloc(strlen(s));
strcpy(data, s);
}
// Copy constructor
String(const String& s) : length(0), data(0) {
_realloc(s.length);
strcpy(data, s.data);
}
~String() { cudaFree(data); }
// Assignment operator
String& operator=(const char* s) {
_realloc(strlen(s));
strcpy(data, s);
return *this;
}
// Element access (from host or device)
__host__ __device__
char& operator[](int pos) { return data[pos]; }
// C-string access
__host__ __device__
const char* c_str() const { return data; }
private:
void _realloc(int len) {
cudaFree(data);
length = len;
cudaMallocManaged(&data, length+1);
}
};
struct DataElement : public Managed
{
String name;
int value;
};
__global__
void Kernel_by_pointer(DataElement *elem) {
printf("On device by pointer: name=%s, value=%d\n", elem->name.c_str(), elem->value);
elem->name[0] = 'p';
elem->value++;
}
__global__
void Kernel_by_ref(DataElement &elem) {
printf("On device by ref: name=%s, value=%d\n", elem.name.c_str(), elem.value);
elem.name[0] = 'r';
elem.value++;
}
__global__
void Kernel_by_value(DataElement elem) {
printf("On device by value: name=%s, value=%d\n", elem.name.c_str(), elem.value);
elem.name[0] = 'v';
elem.value++;
}
void launch_by_pointer(DataElement *elem) {
Kernel_by_pointer<<< 1, 1 >>>(elem);
cudaDeviceSynchronize();
}
void launch_by_ref(DataElement &elem) {
Kernel_by_ref<<< 1, 1 >>>(elem);
cudaDeviceSynchronize();
}
void launch_by_value(DataElement elem) {
Kernel_by_value<<< 1, 1 >>>(elem);
cudaDeviceSynchronize();
}
int main(void)
{
DataElement *e = new DataElement;
e->value = 10;
e->name = "hello";
launch_by_pointer(e);
printf("On host (after by-pointer): name=%s, value=%d\n", e->name.c_str(), e->value);
launch_by_ref(*e);
printf("On host (after by-ref): name=%s, value=%d\n", e->name.c_str(), e->value);
launch_by_value(*e);
printf("On host (after by-value): name=%s, value=%d\n", e->name.c_str(), e->value);
//delete e;
cudaDeviceReset();
}
|
11,857 | #include <stdlib.h>
#include <stdio.h>
#define TILE_DIM 32
__global__ void tileMatMul(float* matA, float* matB, float* matC, int aRows,
int aCols,
int bRows, int bCols, int cRows, int cCols)
{
int Row = blockIdx.y * TILE_DIM + threadIdx.y;
int Col = blockIdx.x * TILE_DIM + threadIdx.x;
__shared__ float sharedMatA[TILE_DIM][TILE_DIM];
__shared__ float sharedMatB[TILE_DIM][TILE_DIM];
float cResultValue = 0.0;
for(int i = 0; i < (aCols-1)/TILE_DIM+1; ++i)
{
if(Row < aRows && i*TILE_DIM+threadIdx.x < aCols)
{
sharedMatA[threadIdx.y][threadIdx.x] = matA[Row*aCols +
i*TILE_DIM+threadIdx.x];
}
else
sharedMatA[threadIdx.y][threadIdx.x] = 0.0;
if(Col < bCols && i*TILE_DIM+threadIdx.y < cRows)
sharedMatB[threadIdx.y][threadIdx.x] =
matB[(i*TILE_DIM+threadIdx.y)*bCols+Col];
else
sharedMatB[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(int j = 0; j < TILE_DIM; ++j)
cResultValue += sharedMatA[threadIdx.y][j] *
sharedMatB[j][threadIdx.x];
__syncthreads();
}
if(Row < cRows && Col < cCols)
matC[Row*cCols+Col] = cResultValue;
}
int main()
{
float *hMatA, *hMatB, *hMatC;
float *dMatA, *dMatB, *dMatC;
int aRows = 512;
int aCols = 512;
int bRows = 512;
int bCols = 512;
int cRows, cCols;
hMatA = (float *) malloc(sizeof(float)*aRows*aCols);
hMatB = (float *) malloc(sizeof(float)*bRows*bCols);
for(int i = 0; i < aRows*aCols; ++i)
{
hMatA[i] = (float)rand()/(float)(RAND_MAX/1.0);
hMatB[i] = (float)rand()/(float)(RAND_MAX/1.0);
}
cRows = aRows;
cCols = bCols;
hMatC = (float *) malloc(sizeof(float)*cRows*cCols);
cudaMalloc((void**)&dMatA, sizeof(float)*aRows*aCols);
cudaMalloc((void**)&dMatB, sizeof(float)*bRows*bCols);
cudaMalloc((void**)&dMatC, sizeof(float)*cRows*cCols);
cudaMemcpy(dMatA, hMatA, sizeof(float)*aRows*aCols, cudaMemcpyHostToDevice);
cudaMemcpy(dMatB, hMatB, sizeof(float)*bRows*bCols, cudaMemcpyHostToDevice);
dim3 dimGrid((cCols - 1)/TILE_DIM+1, (cRows - 1)/TILE_DIM+1, 1);
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
tileMatMul<<<dimGrid,dimBlock>>>(dMatA, dMatB, dMatC, aRows, aCols, bRows,
bCols, cRows, cCols);
cudaThreadSynchronize();
cudaMemcpy(hMatC, dMatC, sizeof(float)*cRows*cCols, cudaMemcpyDeviceToHost);
for(int q = 0; q < 100; ++q)
{
printf("Result matrix #%d: %f\n",q, hMatC[q]);
}
cudaFree(dMatA);
cudaFree(dMatB);
cudaFree(dMatC);
free(hMatA);
free(hMatB);
free(hMatC);
return 0;
}
|
11,858 | #include <iostream>
#include <chrono>
#include <thread>
template <unsigned n_ops>
__global__ void kernel(float* const ptr) {
const unsigned tid = threadIdx.x + blockIdx.x * blockDim.x;
auto v = ptr[tid];
for (unsigned i = 0; i < n_ops; i++) {
v += 1.0f;
}
ptr[tid] = v;
}
void measure_launch_overhead(const unsigned grid_size) {
constexpr unsigned block_size = 1024;
const auto array_length = block_size * grid_size;
constexpr unsigned num_ops_A = 1u << 15;
constexpr unsigned num_ops_B = num_ops_A + (1u << 20);
float *ha, *da_A, *da_B;
cudaMallocHost(&ha , sizeof(float) * array_length);
cudaMalloc (&da_A, sizeof(float) * array_length);
cudaMalloc (&da_B, sizeof(float) * array_length);
// init array
for (unsigned i = 0; i < grid_size * block_size; i++) {
ha[i] = static_cast<float>(i);
}
cudaMemcpy(da_A, ha, sizeof(float) * array_length, cudaMemcpyDefault);
cudaMemcpy(da_B, ha, sizeof(float) * array_length, cudaMemcpyDefault);
// measure elapsed time of A ops
const auto start_A = std::chrono::high_resolution_clock::now();
kernel<num_ops_A><<<grid_size, block_size>>>(da_A);
cudaDeviceSynchronize();
const auto end_A = std::chrono::high_resolution_clock::now();
const auto time_A = std::chrono::duration_cast<std::chrono::nanoseconds>(end_A - start_A).count();
// Sleep 2000ms to cool GPU
using namespace std::chrono_literals;
std::this_thread::sleep_for(2000ms);
// measure elapsed time of B ops
const auto start_B = std::chrono::high_resolution_clock::now();
kernel<num_ops_B><<<grid_size, block_size>>>(da_B);
cudaDeviceSynchronize();
const auto end_B = std::chrono::high_resolution_clock::now();
const auto time_B = std::chrono::duration_cast<std::chrono::nanoseconds>(end_B - start_B).count();
const auto time_diff = time_B - time_A;
std::printf("[GridSize = %8u, BlockSize = %3u] ", grid_size, block_size);
std::printf("elapsed time A : %15lu [ns], elapsed time B : %15lu [ns], time/op : %e [ns], launch overhead : %7lu [ns]\n",
time_A,
time_B,
static_cast<double>(time_diff) / (num_ops_B - num_ops_A),
time_A - time_diff * num_ops_A / (num_ops_B - num_ops_A));
cudaFree (da_A);
cudaFree (da_B);
cudaFreeHost(ha);
}
int main() {
for (unsigned i = 0; i < 20; i++) {
measure_launch_overhead(1u << i);
}
}
|
11,859 | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
}
__global__ void ge_equals_no_transp (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, int* eq_flag) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
const int ia = offset_a + gid_0 + gid_1 * ld_a;
const int ib = offset_b + gid_0 + gid_1 * ld_b;
if (a[ia] != b[ib]){
eq_flag[0]++;
}
}
} |
11,860 | #include "includes.h"
__global__ void dotProdKernel(float *a, float *b, float *ab, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( (idx*N) < (N*N) ) {
ab[idx * N] = a[idx *N] * b[idx * N];
}
} |
11,861 | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void vecadd(int *A, int *B, int *C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main()
{
int n;
printf("Enter the no. of elements\n");
scanf("%i", &n);
int *hA = (int *)calloc(n, sizeof(int));
int *hB = (int *)calloc(n, sizeof(int));
int *hC = (int *)calloc(n, sizeof(int));
printf("Enter array 1\n");
for (int i = 0; i < n; i++)
{
scanf("%i", &hA[i]);
}
printf("Enter array 2\n");
for (int i = 0; i < n; i++)
{
scanf("%i", &hB[i]);
}
int *dA, *dB, *dC;
cudaMalloc(&dA, n * sizeof(int));
cudaMalloc(&dB, n * sizeof(int));
cudaMalloc(&dC, n * sizeof(int));
cudaMemcpy(dA, hA, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, n * sizeof(int), cudaMemcpyHostToDevice);
vecadd<<<1, n>>>(dA, dB, dC);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(err));
}
cudaMemcpy(hA, dC, n * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
printf("%i ", hA[i]);
}
printf("\n");
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
return 0;
}
|
11,862 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
/*
To compile:
nvcc -o SantosLinear SantosLinear.cu
./SantosLinear
*/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{82.54,145.92},{79.13,125.54},{72.66,120.05},{65.59,100.38},
{78.27,136.17},{77.25,117.81},{65.84,107.17},{76.12,134.15},
{76.49,96.78},{47.94,64.98},{61.80,103.60},{84.45,111.95},
{50.00,92.64},{99.96,147.10},{ 7.92,40.72},{20.36,38.90},
{53.22,81.07},{30.32,73.57},{82.38,132.77},{60.41,98.29},
{ 3.74,35.90},{79.72,117.61},{21.55,71.41},{20.81,58.80},
{ 6.85,29.40},{48.20,54.46},{62.51,100.88},{ 2.89,40.58},
{79.90,122.81},{84.94,137.16},{88.95,160.50},{47.00,70.95},
{78.86,143.59},{62.79,104.83},{20.29,65.93},{83.78,123.76},
{15.71,41.14},{49.35,89.02},{43.99,95.64},{24.12,70.60},
{36.86,68.70},{ 3.49,23.95},{37.43,69.28},{70.55,95.36},
{53.58,79.61},{55.79,101.29},{16.39,58.31},{91.65,115.29},
{82.65,130.89},{48.78,105.63},{82.92,131.99},{64.86,110.09},
{64.75,93.89},{ 4.83,25.91},{ 0.59,30.17},{ 0.72,28.41},
{16.47,46.15},{40.42,70.11},{ 3.18,25.76},{59.02,113.49},
{32.52,65.72},{42.65,87.74},{70.58,124.61},{ 0.09,30.33},
{23.24,64.44},{75.05,125.90},{24.16,49.25},{37.75,76.66},
{81.47,137.19},{59.58,108.26},{59.56,114.79},{76.89,128.06},
{ 9.76,44.84},{18.39,51.23},{58.29,120.09},{80.81,119.03},
{77.93,132.71},{97.30,135.73},{69.14,92.52},{22.42,50.10},
{41.90,59.59},{33.67,54.58},{94.44,155.64},{50.51,92.26},
{50.57,70.49},{59.20,103.78},{57.09,110.74},{79.47,124.25},
{79.64,117.26},{42.89,89.97},{64.89,108.26},{65.02,107.66},
{99.94,163.00},{38.56,81.90},{95.17,147.58},{59.96,113.55},
{18.25,44.49},{10.60,19.67},{33.68,76.62},{96.42,149.59},
{25.47,40.35},{29.78,73.31},{32.59,62.29},{33.87,69.54},
{54.15,102.50},{14.19,44.72},{10.55,47.75},{82.20,111.38},
{15.58,43.53},{71.47,102.98},{15.31,46.36},{ 1.72,12.58},
{27.67,59.35},{83.63,147.60},{44.36,57.03},{17.88,46.07},
{38.27,66.44},{89.79,141.09},{54.56,84.84},{ 8.92,56.82},
{11.71,23.04},{69.04,119.32},{70.99,119.46},{ 8.12,35.45},
{26.97,62.04},{76.27,143.28},{27.19,52.66},{ 7.48,35.88},
{26.28,46.29},{15.86,49.31},{52.24,70.69},{72.77,129.40},
{ 3.20,30.89},{72.60,126.43},{59.68,96.52},{87.72,134.59},
{66.32,118.14},{63.10,110.15},{89.28,141.58},{57.83,98.06},
{39.40,92.51},{86.62,133.71},{37.79,59.83},{93.50,139.06},
{60.27,109.60},{31.32,78.75},{72.14,122.61},{ 6.94,38.40},
{28.59,73.61},{73.17,114.77},{32.10,60.83},{45.15,86.50},
{60.53,108.41},{31.27,57.00},{39.38,68.36},{73.19,135.10},
{46.73,78.33},{16.52,49.96},{34.43,60.80},{33.31,70.69},
{82.59,119.24},{62.32,109.02},{14.59,39.11},{85.50,148.34},
{ 4.70,15.17},{77.38,132.34},{38.08,79.30},{43.28,81.24},
{50.76,85.11},{ 6.10,44.28},{17.37,39.34},{53.23,102.51},
{46.97,84.96},{82.34,121.44},{82.82,135.65},{14.95,46.46},
{34.92,55.96},{76.50,121.39},{33.96,82.71},{33.81,75.71},
{85.97,145.58},{54.93,107.02},{35.84,74.14},{31.25,72.02},
{45.30,83.04},{36.90,78.48},{72.33,114.85},{ 9.26,45.11},
{38.16,79.84},{75.62,122.58},{46.35,80.75},{59.38,84.45},
{80.25,144.44},{74.87,129.35},{78.57,138.28},{67.79,109.42},
{56.63,89.32},{87.37,154.45},{24.78,67.81},{42.59,69.71},
{21.87,56.09},{45.40,84.68},{53.00,93.30},{ 9.96,34.66},
{70.49,107.10},{25.19,56.44},{28.27,56.57},{26.56,45.40},
{21.17,46.21},{72.95,125.45},{36.95,82.73},{34.87,63.67},
{26.84,78.06},{ 5.87,37.02},{81.40,114.31},{46.24,67.09},
{15.36,28.81},{73.75,120.90},{22.17,55.09},{29.65,60.96},
{50.70,75.55},{54.89,79.26},{80.60,142.14},{24.62,66.66},
{ 1.82,20.56},{68.34,107.73},{95.19,162.24},{ 8.77,39.79},
{98.68,154.81},{44.69,78.41},{58.55,84.50},{82.69,129.36},
{76.04,140.90},{20.81,36.16},{93.81,144.90},{69.62,111.04},
{68.75,99.34},{21.75,37.53},{49.72,80.49},{38.21,52.51},
{62.51,104.03},{ 9.06,32.07},{ 9.05,44.80},{88.63,142.62},
{42.32,101.24},{29.55,50.21},{24.88,52.81},{99.41,152.87},
{51.84,99.81},{46.45,98.38},{37.89,71.32},{97.06,160.47},
{23.74,70.83},{86.43,146.38},{23.93,43.60},{34.50,75.87},
{ 2.24,20.30},{77.93,138.50},{74.35,111.92},{89.22,141.37},
{49.81,90.81},{71.21,104.14},{20.64,43.69},{66.87,111.31},
{75.47,116.17},{24.57,57.12},{67.47,98.57},{85.39,133.18},
{45.12,88.99},{25.78,40.69},{ 8.75,44.48},{14.34,65.44},
{98.07,159.41},{64.11,102.03},{21.75,70.05},{92.78,151.41},
{71.47,122.64},{71.57,126.15},{67.73,116.35},{58.71,100.96},
{33.50,59.20},{18.59,66.75},{78.74,116.00},{28.51,78.22},
{ 4.87,44.86},{22.39,41.11},{33.53,62.91},{28.16,63.59},
{96.95,148.51},{98.43,144.64},{86.46,155.89},{81.98,135.72},
{99.49,153.80},{60.10,107.45},{54.65,95.78},{ 4.12,29.92},
{92.76,151.07},{19.06,47.87},{74.74,114.86},{56.77,76.20},
{61.28,107.17},{49.73,90.93},{31.62,83.42},{72.22,131.74},
{48.47,84.83},{89.84,124.97},{75.91,113.30},{ 6.48,11.00},
{70.50,117.74},{37.07,78.48},{52.08,100.48},{95.27,151.06},
{80.04,126.91},{99.00,150.15},{24.88,43.89},{13.90,31.65},
{11.88,38.92},{ 8.51,32.81},{69.81,117.15},{99.52,146.13},
{58.79,88.03},{ 4.60,15.68},{37.68,78.52},{39.45,68.65},
{ 1.93,29.81},{68.45,114.66},{63.34,117.33},{35.93,76.91},
{89.98,146.67},{39.61,76.72},{93.52,130.43},{92.88,138.22},
{46.35,111.10},{92.88,141.21},{70.32,114.14},{10.58,25.89},
{89.25,151.26},{90.81,130.61},{93.69,132.92},{46.99,79.45},
{91.32,138.72},{ 2.30,15.71},{ 2.25,30.66},{23.57,55.45},
{12.26,30.69},{ 5.06,36.12},{50.51,105.84},{86.01,132.02},
{76.83,122.78},{ 3.59,28.33},{44.53,85.91},{32.91,78.47},
{29.42,65.15},{ 7.35,22.34},{22.17,33.26},{80.33,138.66},
{16.18,47.52},{22.55,69.13},{59.30,103.83},{19.34,57.73},
{18.76,59.59},{80.12,121.38},{90.88,145.30},{11.62,49.49},
{85.83,143.38},{81.91,133.89},{37.85,75.59},{16.50,32.94},
{68.52,117.88},{52.16,95.21},{19.84,62.98},{60.73,117.12},
{63.90,110.93},{15.82,44.36},{93.24,151.85},{10.29,31.41},
{68.76,123.41},{27.76,67.89},{45.87,77.04},{55.60,96.42},
{50.26,92.70},{54.51,92.25},{20.19,46.43},{94.34,140.86},
{87.51,131.96},{ 2.09,34.05},{76.49,134.14},{76.85,126.34},
{69.79,108.18},{97.87,168.75},{37.12,55.39},{31.66,74.41},
{68.08,102.29},{37.65,69.58},{97.34,142.41},{53.81,100.86},
{54.75,100.43},{ 5.16,32.16},{69.87,119.91},{32.81,63.30},
{39.89,87.45},{73.14,127.54},{23.63,64.61},{14.92,60.68},
{98.83,154.62},{77.66,124.75},{93.03,155.98},{47.16,65.07},
{21.73,41.67},{58.06,101.88},{63.89,113.18},{77.97,123.52},
{29.66,47.24},{78.59,122.87},{38.72,61.11},{41.20,66.00},
{60.82,95.07},{ 4.12,23.58},{75.64,117.51},{87.26,145.07},
{76.26,139.41},{45.73,92.04},{65.68,114.39},{65.67,124.52},
{19.31,57.91},{36.43,64.22},{99.62,164.88},{50.16,71.90},
{49.70,99.36},{50.57,81.43},{53.69,78.82},{60.26,101.20},
{85.84,135.90},{95.23,161.49},{53.20,89.19},{91.36,139.23},
{29.80,61.21},{60.85,115.69},{ 2.43,26.13},{20.32,57.97},
{29.81,70.82},{36.83,76.87},{95.49,147.89},{27.20,70.77},
{ 9.06,37.56},{ 9.60,26.25},{34.71,97.78},{47.81,85.95},
{45.11,88.88},{91.68,170.70},{61.87,103.75},{21.35,45.49},
{30.52,75.30},{97.74,148.82},{51.47,95.07},{23.01,50.28},
{87.69,153.65},{28.36,68.77},{ 5.61,41.08},{81.55,112.05},
{50.63,84.20},{ 3.44,45.95},{24.80,59.44},{ 2.65,15.09},
{60.09,108.71},{30.23,52.63},{42.78,92.61},{38.52,77.43},
{72.51,118.84},{ 5.46,33.79},{78.26,131.71},{77.39,123.68},
{ 1.61,34.21},{96.99,160.15},{16.07,49.41},{51.26,94.71},
{22.64,69.05},{52.76,93.25},{31.89,69.73},{19.86,41.19},
{94.76,139.95},{71.59,127.65},{82.86,126.10},{58.49,90.54},
{23.55,47.67},{ 7.97,41.91},{20.69,48.99},{26.54,46.11},
{92.15,114.73},{69.56,91.47},{58.83,115.16},{58.00,101.91},
{40.28,78.91},{22.77,63.76},{54.72,105.40},{96.27,146.66},
{78.40,133.20},{55.11,76.68},{25.82,72.29},{30.15,74.90},
{55.41,101.10},{40.66,87.45},{31.31,74.39},{93.59,166.09},
{86.40,120.49},{63.78,119.02},{35.90,73.42},{57.83,90.27},
{26.87,61.77},{41.13,69.36},{71.17,149.94},{72.02,125.13},
{96.22,147.46},{15.95,43.65},{89.52,141.68},{86.25,148.39},
{76.12,114.34},{71.90,118.02},{82.84,120.60},{64.11,123.24},
{93.21,146.56},{59.80,103.56},{17.46,30.86},{25.21,43.25},
{27.59,70.25},{ 8.13,30.28},{75.12,107.43},{17.03,50.03},
{41.70,88.69},{59.19,96.81},{22.71,48.98},{84.24,106.94},
{ 9.31,38.99},{47.75,90.46},{22.58,36.20},{95.99,152.11},
{70.84,115.13},{32.29,65.95},{35.29,86.95},{13.37,40.42},
{ 3.35,28.85},{93.36,135.47},{97.42,160.14},{71.00,130.15},
{39.27,86.13},{56.61,107.53},{78.80,123.79},{27.36,65.64},
{ 8.86,23.57},{92.97,143.47},{13.43,28.97},{25.97,49.39},
{21.94,50.20},{98.63,143.14},{ 2.84,19.86},{99.43,160.03},
{53.53,87.12},{26.30,69.98},{67.88,99.69},{32.81,66.79},
{42.65,67.91},{67.81,114.43},{40.99,78.61},{80.92,124.44},
{17.94,49.13},{51.70,86.77},{24.22,53.84},{89.42,131.54},
{70.15,126.90},{17.95,53.76},{79.04,123.07},{29.71,61.50},
{97.49,162.89},{ 6.59,23.00},{ 5.53,46.81},{72.71,120.54},
{88.96,152.45},{ 1.21,34.60},{ 5.62,39.20},{35.97,65.63},
{25.35,66.22},{72.08,140.06},{33.71,74.29},{72.82,118.23},
{10.10,36.18},{87.18,133.56},{71.24,114.03},{93.31,135.47},
{ 4.66,28.49},{86.83,157.98},{57.43,88.19},{97.28,155.39},
{92.76,140.78},{61.73,96.02},{82.07,138.35},{62.98,109.31},
{77.80,121.62},{27.96,69.60},{52.92,95.27},{50.61,100.54},
{81.50,122.90},{37.04,79.85},{40.31,74.58},{44.25,79.22},
{10.57,40.48},{61.47,97.31},{76.09,132.85},{77.72,114.80},
{75.90,134.10},{68.91,125.40},{39.34,64.16},{89.29,137.58},
{ 4.12,20.66},{75.14,124.62},{14.70,55.99},{66.92,103.69},
{93.89,146.83},{80.78,133.10},{51.98,83.76},{45.63,91.19},
{56.25,106.96},{72.27,117.25},{88.32,143.02},{79.99,138.66},
{17.61,27.37},{61.53,103.11},{88.67,139.22},{ 8.05,25.50},
{59.61,107.69},{ 8.00,48.50},{30.58,61.71},{86.76,138.59},
{80.38,145.01},{65.92,120.62},{57.48,93.89},{72.43,109.43},
{84.72,128.45},{23.99,54.22},{51.99,69.88},{69.76,131.52},
{18.78,50.65},{76.92,133.27},{20.11,50.78},{17.90,60.00},
{97.90,151.89},{62.53,93.53},{93.81,148.57},{ 7.81,27.50},
{28.43,54.07},{42.17,59.85},{36.25,67.25},{ 1.87,25.11},
{44.57,85.62},{45.18,87.97},{14.69,44.65},{16.12,46.83},
{61.81,114.06},{38.62,78.90},{18.93,57.64},{41.74,69.45},
{89.52,129.88},{79.16,112.10},{82.76,132.21},{13.50,44.82},
{10.65,49.67},{68.52,94.26},{28.85,75.44},{78.66,128.63},
{10.27,45.30},{20.21,39.81},{85.99,130.98},{50.71,71.42},
{33.52,83.11},{42.02,91.22},{81.95,133.52},{42.00,88.45},
{93.28,144.04},{91.76,147.87},{54.37,118.42},{59.81,78.12},
{42.51,78.12},{14.71,44.62},{36.02,56.67},{94.96,148.53},
{99.98,135.67},{11.55,36.05},{24.24,63.03},{54.99,91.22},
{33.00,91.65},{75.16,144.36},{88.76,142.54},{49.98,87.35},
{35.86,61.05},{81.90,141.91},{31.71,62.45},{25.83,61.03},
{ 8.49,35.70},{75.69,128.30},{83.62,144.97},{26.60,50.01},
{13.92,32.44},{65.81,120.45},{92.74,150.96},{60.15,102.68},
{91.01,146.24},{34.35,60.83},{44.01,81.18},{65.31,106.48},
{68.15,123.97},{75.57,110.51},{65.55,105.87},{14.43,37.14},
{25.65,50.14},{89.48,144.31},{59.31,122.26},{25.78,79.82},
{72.31,122.34},{79.97,130.61},{23.82,70.49},{36.20,80.54},
{18.64,43.30},{16.24,56.67},{ 6.64,27.39},{97.93,155.98},
{20.37,57.08},{33.93,76.36},{49.37,94.44},{64.48,110.94},
{48.03,91.54},{21.97,61.20},{22.02,61.66},{50.13,95.39},
{48.92,105.19},{14.38,44.04},{77.64,102.92},{39.12,100.47},
{ 8.97,29.69},{66.94,114.75},{50.67,85.25},{38.79,82.09},
{48.57,74.49},{69.69,114.27},{81.95,129.60},{85.01,123.31},
{64.49,117.95},{42.62,87.45},{ 3.84,30.82},{17.33,55.56},
{66.24,94.77},{ 0.52,31.45},{59.14,116.42},{79.37,117.62},
{34.91,73.84},{29.41,63.56},{80.09,138.81},{53.34,90.21},
{40.53,91.91},{23.01,58.53},{32.58,75.48},{15.90,47.98},
{ 5.58,24.14},{59.06,115.93},{67.43,124.42},{58.44,95.71},
{79.45,122.61},{66.57,99.14},{21.15,53.24},{46.71,94.30},
{24.85,33.03},{38.22,76.27},{21.46,57.73},{19.01,54.31},
{37.58,84.83},{90.52,144.01},{ 0.07,23.50},{99.32,155.94},
{82.62,127.06},{ 0.87,29.38},{11.37,49.33},{58.23,90.59},
{ 3.74,37.56},{ 9.07,30.06},{51.29,94.47},{52.39,89.50},
{98.37,159.16},{33.17,75.79},{58.55,110.48},{29.41,75.36},
{ 2.85,17.86},{16.82,48.18},{ 7.50,37.87},{71.20,128.33},
{77.60,131.30},{41.49,75.43},{54.15,96.60},{95.67,160.35},
{40.88,72.40},{25.86,50.43},{44.74,96.51},{70.50,121.79},
{ 4.20,15.74},{ 0.56,20.95},{59.24,99.63},{94.90,162.08},
{22.88,41.76},{44.34,73.70},{32.90,64.78},{92.09,144.54},
{64.50,123.10},{90.90,141.98},{63.43,102.73},{56.02,88.85},
{69.47,129.39},{11.30,44.10},{ 0.89, 0.93},{39.55,74.94},
{17.58,44.82},{35.79,63.03},{40.69,96.20},{84.17,140.40},
{13.23,45.08},{51.02,107.55},{60.56,99.90},{52.85,95.61},
{72.32,109.05},{42.01,81.78},{52.49,73.29},{86.05,158.54},
{59.85,99.52},{45.49,68.20},{25.32,56.60},{78.55,104.86},
{56.57,98.69},{99.12,130.06},{ 1.07,-0.32},{53.02,87.68},
{66.98,116.46},{63.00,108.70},{88.99,134.64},{53.37,91.65},
{10.13,39.45},{97.07,149.59},{85.50,142.27},{30.08,73.05},
{ 4.47,38.92},{46.64,90.55},{84.84,132.88},{ 8.56,40.82},
{33.86,56.51},{43.73,82.73},{55.67,96.45},{ 9.49,31.08},
{69.54,123.51},{44.59,78.62},{40.18,88.61},{12.64,50.00},
{52.38,90.39},{41.19,105.10},{96.11,166.41},{27.74,44.96},
{36.79,71.66},{ 4.23,21.37},{27.72,50.67},{ 9.51,36.65},
{31.31,58.40},{88.10,128.07},{28.88,59.31},{66.09,104.18},
{89.33,142.92},{44.11,61.80},{ 1.31,29.48},{90.24,154.17},
{47.38,95.76},{28.07,55.21},{52.29,101.06},{63.14,101.40},
{78.90,128.11},{20.26,34.85},{43.31,89.48},{56.94,100.09},
{48.32,77.52},{88.96,146.43},{85.86,118.63},{ 2.05,19.48},
{88.81,139.76},{33.38,61.97},{86.46,153.33},{70.21,125.51},
{60.33,114.61},{40.10,86.16},{54.92,99.03},{17.74,48.48},
{29.13,81.66},{38.50,91.91},{49.94,86.08},{36.79,67.19},
{70.63,110.77},{50.80,84.89},{74.90,133.55},{28.34,62.10},
{ 3.02,11.89},{12.22,49.36},{98.31,158.21},{98.26,146.79},
{50.44,79.91},{76.85,114.15},{54.10,92.78},{44.38,77.54},
{68.34,124.52},{73.39,127.73},{93.62,143.96},{86.29,144.49},
{14.61,56.60},{80.83,115.46},{68.38,116.28},{88.28,137.04},
{77.87,126.99},{83.49,145.46},{22.35,62.51},{52.13,78.60},
{ 1.38,26.93},{44.54,90.02},{51.37,82.02},{72.02,127.76},
{22.76,46.46},{98.22,147.11},{22.21,62.33},{99.29,158.31},
{48.19,89.27},{92.79,127.22},{64.96,93.81},{ 3.57,16.73},
{81.04,132.42},{74.21,111.82},{ 9.79,37.31},{ 4.09, 8.48},
{ 8.96,20.36},{30.70,55.48},{ 9.79,34.22},{89.76,138.25},
{72.79,122.88},{43.20,87.57},{37.17,75.97},{14.43,36.14},
{15.57,40.86},{ 2.20,23.61},{ 2.33,33.59},{31.66,83.74},
{49.87,74.09},{42.59,72.01},{ 7.32,21.53},{90.68,141.21},
{18.81,48.04},{19.26,63.05},{33.76,56.80},{79.96,129.50},
{78.76,135.96},{93.37,124.64},{12.73,45.71},{19.49,65.10},
{35.44,71.79},{87.92,145.25},{33.32,75.63},{ 9.60,28.09},
{56.36,104.74},{35.20,61.94},{34.42,77.26},{76.27,118.70},
{79.67,116.01},{81.99,124.31},{13.83,51.17},{27.94,65.67},
{80.05,127.99},{11.82,46.14},{12.26,39.31},{13.69,38.26},
{24.48,56.22},{ 6.74,41.08},{39.05,69.81},{ 3.53,40.31},
{61.53,96.54},{43.41,77.91},{96.68,157.36},{63.05,100.78},
{43.16,74.41},{86.16,125.34},{93.62,147.26},{65.39,117.76},
{72.53,108.02},{ 6.72,37.90},{95.62,148.91},{52.28,108.74},
{73.36,125.22},{11.78,40.55},{ 9.98,32.67},{79.02,133.92},
{ 9.50,38.50},{15.23,41.28},{24.58,62.80},{64.41,109.13}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
printf("best m,c is %lf,%lf with error %lf in direction %d\n",
dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
11,863 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
int set_grid(int SIZE, int BLOCK_SIZE)
{
return SIZE/BLOCK_SIZE + ((SIZE % BLOCK_SIZE)? 1 : 0);
}
// Performs one step of the hillis and steele algorithm for integers
__global__ void hs_kernel_global(int *d_out, int *d_in, int step, int SIZE) {
// setting ID
int tid = threadIdx.x + blockDim.x * blockIdx.x;
// checking if out-of-bounds
if (tid >= SIZE) return;
// setting itself
int val = d_in[tid];
// finding the number to add, checking out-of-bounds
int toAdd = (((tid - step) < 0) ? 0 : d_in[tid - step]);
// setting output
d_out[tid] = val + toAdd;
}
void hs_kernel_wrapper(int * d_out, int * d_in, int SIZE, unsigned int BYTES, int NUM_THREADS) {
// initializing starting variables
int NUM_BLOCKS = SIZE/NUM_THREADS + 1;
// initializing and allocating an "intermediate" value so we don't have to change anything in d_in
int *d_intermediate;
cudaMalloc((void **) &d_intermediate, BYTES);
cudaMemcpy(d_intermediate, d_in, BYTES, cudaMemcpyDeviceToDevice);
// stops when step is larger than array size, happens at O(log2(SIZE))
for (int step = 1; step < SIZE; step <<= 1) {
hs_kernel_global<<<NUM_BLOCKS, NUM_THREADS>>>(d_out, d_intermediate, step, SIZE);
cudaMemcpy(d_intermediate, d_out, BYTES, cudaMemcpyDeviceToDevice);
}
cudaFree(d_intermediate);
}
/* -------- MAIN -------- */
int main(int argc, char **argv)
{
std::ofstream myfile;
myfile.open ("par_scan.csv");
// Setting NUM_THREADS
const unsigned int times = 10;
for (unsigned int rounds = 0; rounds<30; rounds++)
{
// printf("Round: %d\n", rounds);
int NUM_THREADS = 1<<10;
// Making non-bogus data and setting it on the GPU
int SIZE = 1<<rounds;
unsigned int BYTES = SIZE * sizeof(int);
int * d_in;
int * d_out;
cudaMalloc(&d_in, sizeof(int)*SIZE);
cudaMalloc(&d_out, sizeof(int)*SIZE);
int * h_in = (int *)malloc(SIZE*sizeof(int));
int * h_out = (int *)malloc(SIZE*sizeof(int));
for (unsigned int i = 0; i < SIZE; i++) h_in[i] = 1;
cudaMemcpy(d_in, h_in, BYTES, cudaMemcpyHostToDevice);
// Running kernel wrapper
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// kernel time!!!
cudaEventRecord(start, 0);
for (unsigned int i = 0; i < times; i++)
{
hs_kernel_wrapper(d_out, d_in, SIZE, BYTES, NUM_THREADS);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
printf("time!: %.5f\n", elapsedTime);
cudaMemcpy(h_out, d_out, BYTES, cudaMemcpyDeviceToHost);
// printf("%d \n", h_out);
myfile << elapsedTime << "," << std::endl;
}
myfile.close();
return 0;
}
|
11,864 | #include <iostream>
#include <vector>
#include <chrono>
#include <cassert>
/***
first argument is the approach
0: sequential, 1: reduce1, 2: reduce2, 3: reduce3, 4: reduce4, 5:reduce5
second argument is the number of elements
***/
__device__ void warpReduce(volatile unsigned int* sdata, const unsigned int tid, const unsigned int elements){
if(elements > 32) sdata[tid] += sdata[tid + 32];
if(elements > 16) sdata[tid] += sdata[tid + 16];
if(elements > 8) sdata[tid] += sdata[tid + 8];
if(elements > 4) sdata[tid] += sdata[tid + 4];
if(elements > 2) sdata[tid] += sdata[tid + 2];
if(elements > 1) sdata[tid] += sdata[tid + 1];
}
__global__ void kernel5(unsigned int* d_in, unsigned int* d_out, const unsigned int elements){
extern __shared__ unsigned int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * (blockDim.x * 2);
sdata[tid] = 0;
if(i < elements){
if(gridDim.x > 1) sdata[tid] = d_in[i] + d_in[i + blockDim.x];
else sdata[tid] = d_in[i];
}
__syncthreads();
for(unsigned int s = blockDim.x / 2; s > 32; s >>= 1){
if(tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if(tid < 32) warpReduce(sdata, tid, elements);
if(tid == 0) d_out[blockIdx.x] = sdata[0];
}
__global__ void kernel4(unsigned int* d_in, unsigned int* d_out, const unsigned int elements){
extern __shared__ unsigned int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * (blockDim.x * 2);
sdata[tid] = 0;
if(i < elements){
if(gridDim.x > 1) sdata[tid] = d_in[i] + d_in[i + blockDim.x];
else sdata[tid] = d_in[i];
}
__syncthreads();
for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1){
if(tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if(tid == 0) d_out[blockIdx.x] = sdata[0];
}
__global__ void kernel3(unsigned int* d_in, unsigned int* d_out, const unsigned int elements){
extern __shared__ unsigned int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = (i < elements) ? d_in[i] : 0;
__syncthreads();
for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1){
if(tid < s) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if(tid == 0) d_out[blockIdx.x] = sdata[0];
}
__global__ void kernel2(unsigned int* d_in, unsigned int* d_out, const unsigned int elements){
extern __shared__ unsigned int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = (i < elements) ? d_in[i] : 0;
__syncthreads();
for(unsigned int s = 1; s < blockDim.x; s*=2){
unsigned int index = 2 * s * tid;
if(index < blockDim.x) sdata[index] += sdata[index + s];
__syncthreads();
}
if(tid == 0) d_out[blockIdx.x] = sdata[0];
}
__global__ void kernel1(unsigned int* d_in, unsigned int* d_out, const unsigned int elements){
extern __shared__ unsigned int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = (i < elements) ? d_in[i] : 0;
__syncthreads();
for(unsigned int s = 1; (s < blockDim.x) && (tid+s) < elements; s*=2){
if(tid % (2*s) == 0) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if(tid == 0) d_out[blockIdx.x] = sdata[0];
}
void reduce_sum(const int approach, unsigned int& elements){
unsigned int numBlocks = 0;
int numThreads = 1024;
unsigned int originalElements = elements;
unsigned int size = elements * sizeof(unsigned int);
std::vector<unsigned int> h_in;
std::vector<unsigned int> h_out(elements, 0);
for(unsigned int i = 0; i < elements; ++i) h_in.push_back(i%2);
unsigned int gold_sum = elements/2;
unsigned int* d_in;
unsigned int* d_out;
cudaMalloc(&d_in, size);
cudaMalloc(&d_out, size);
cudaMemcpy(d_in, h_in.data(), size, cudaMemcpyHostToDevice);
auto start = std::chrono::steady_clock::now();
while(elements > 1){
if(elements < numThreads){
numBlocks = 1;
numThreads = elements;
}
else numBlocks = (elements - 1) / numThreads + 1;
if(approach == 1)
kernel1<<<numBlocks, numThreads, numThreads * sizeof(unsigned int)>>>(d_in, d_out, elements);
else if(approach == 2)
kernel2<<<numBlocks, numThreads, numThreads * sizeof(unsigned int)>>>(d_in, d_out, elements);
else if(approach == 3)
kernel3<<<numBlocks, numThreads, numThreads * sizeof(unsigned int)>>>(d_in, d_out, elements);
else if(approach == 4)
kernel4<<<numBlocks, numThreads, numThreads * sizeof(unsigned int)>>>(d_in, d_out, elements);
else
kernel5<<<numBlocks, numThreads, numThreads * sizeof(unsigned int)>>>(d_in, d_out, elements);
elements = numBlocks;
if(approach >= 4) elements = elements / 2;
d_in = d_out;
}
auto end = std::chrono::steady_clock::now();
cudaMemcpy(h_out.data(), d_out, size, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
assert(h_out[0] == gold_sum);
std::cout << "reduce " << approach << " took " << std::chrono::duration_cast<std::chrono::microseconds>(end-start).count();
std::cout << " ms to sum " << originalElements << " elements\n";
}
void reduce_sum_sequential(const unsigned int elements){
std::vector<unsigned int> h_in;
unsigned int gold_sum, sequential_sum;
for(unsigned int i = 0; i < elements; ++i) h_in.push_back(i%2);
gold_sum = elements / 2;
auto start = std::chrono::steady_clock::now();
for(auto i : h_in) sequential_sum += i;
auto end = std::chrono::steady_clock::now();
assert(gold_sum == sequential_sum);
std::cout << "sequential reduce took " << std::chrono::duration_cast<std::chrono::microseconds>(end-start).count();
std::cout << " ms to sum " << elements << " elements\n";
}
int main(int argc, char* argv[]){
int approach = std::stoi(argv[1]);
unsigned int elements = std::stoi(argv[2]);
if(approach == 0) reduce_sum_sequential(elements);
else reduce_sum(approach, elements);
return 0;
}
|
11,865 | /*
A multithreaded C-program for MT19937.
Original single threaded C reference coded by Takuji Nishimurar
and Makoto Matsumoto, with initialization improved 2002/1/26.
Multithreaded C implementation coded by Eric Mills.
Before using, initialize the state by using mt19937gi(seed)
or mt19937gai(init_key, key_length) for the global memory versions or
mt19937si(seed) or mt19937sai(init_key, key_length) for all shared
memory versions.
Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
All rights reserved.
Multithreaded implementation Copyright (C) 2007, Eric Mills.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Any feedback is very welcome.
http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html
email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space)
*/
#define NVG80 /* For Nvidia G80 achitecture where mod is VERY slow */
#ifdef NVG80
#define mod(x, y) ((x) < (y) ? (x) : (x) - (y)) /* Short mod - known input range */
#else
#define mod(x, y) ((x) % (y))
#endif
#ifdef _WIN32
typedef unsigned int uint;
#endif
#define N 624
#define M 397
#define INIT_MULT 1812433253 /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
#define ARRAY_SEED 19650218 /* Seed for initial setup before incorp array seed */
#define MATRIX_A 0x9908b0df /* Constant vector a */
#define UPPER_MASK 0x80000000 /* Most significant w-r bits */
#define LOWER_MASK 0x7fffffff /* Least significant r bits */
#define TEMPER1 0x9d2c5680
#define TEMPER2 0xefc60000
/* First a global memory implementation that uses 2 global reads and 1 global
* write per result and keeps only 2 words of state in permanent shared memory. */
#define MAX_THREADS 227 /* Set to minimise shared memory allocation (max blockDim.x) */
#define MAX_BLOCKS 256 /* Set to minimise global memory allocation (max gridDim.x) */
__shared__ int mtNext; /* Start of next block of seeds */
__shared__ uint mtNexti; /* Indirect on above to save global read cycle */
__device__ uint g_seeds[MAX_BLOCKS][N];
__constant__ uint mag01[2] = {0, MATRIX_A}; /* 2 way bus conflict for each read */
/* Init by single seed - single threaded as only used once */
__device__ static void
mt19937gi(uint seed)
{
int i;
mtNext = 0;
if (threadIdx.x == 0)
{
g_seeds[blockIdx.x][0] = mtNexti = seed;
for (i = 1; i < N; i++)
{
seed = (INIT_MULT * (seed ^ (seed >> 30)) + i);
g_seeds[blockIdx.x][i] = seed;
}
}
return;
}
/* Init by array - single threaded as only used once, opt to reduce global refs */
__device__ static void
mt19937gai(uint* seeds, uint length)
{
mt19937gi(ARRAY_SEED);
if (threadIdx.x == 0)
{
int i = 1;
int j = 0;
int k;
uint mti; /* g_seeds[i] */
uint mtj; /* g_seeds[i - 1] */
mti = g_seeds[blockIdx.x][0];
for (k = N > length ? N : length; k != 0; k--)
{
mtj = mti;
mti = g_seeds[blockIdx.x][i];
mti = (mti ^ ((mtj ^ (mtj >> 30)) * 1664525)) + seeds[j] + j;
g_seeds[blockIdx.x][i] = mti;
if (++i >= N)
{
g_seeds[blockIdx.x][0] = mti;
i = 1;
}
if (++j >= length)
{
j = 0;
}
}
for (k = N - 1; k != 0; k--)
{
mtj = mti;
mti = g_seeds[blockIdx.x][i];
mti = (mti ^ ((mtj ^ (mtj >> 30)) * 1566083941)) - i;
g_seeds[blockIdx.x][i] = mti;
if (++i >= N)
{
g_seeds[blockIdx.x][0] = mti;
i = 1;
}
}
g_seeds[blockIdx.x][0] = mtNexti = 0x80000000; /* MSB is 1; assuring non-zero initial array */
}
return;
}
/* Return next MT random by increasing thread ID, Good for 1 - 227 threads.
* Note you should wind back MAX_THREADS to your max requirement
* to keep auto allocation of shared mem to a minimum.
* Best as a general purpose library routine. */
__device__ static uint
mt19937g(void)
{
int kk;
uint y;
const int tid = threadIdx.x;
__shared__ uint seed[MAX_THREADS + 1];
kk = mod(mtNext + tid, N);
__syncthreads(); /* Finish with mtNext & g_seeds ready from last call & init */
seed[tid + 1] = g_seeds[blockIdx.x][mod(kk + 1, N)]; /* Sequential but not aligned */
if (tid == blockDim.x - 1)
{
mtNext = kk + 1;
seed[0] = mtNexti;
mtNexti = seed[blockDim.x];
}
__syncthreads(); /* seed[] ready */
y = (seed[tid] & UPPER_MASK) | (seed[tid + 1] & LOWER_MASK);
y = g_seeds[blockIdx.x][kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1];
g_seeds[blockIdx.x][kk] = y; /* Does not overlap above reads */
y ^= (y >> 11); /* Tempering */
y ^= (y << 7) & TEMPER1;
y ^= (y << 15) & TEMPER2;
y ^= (y >> 18);
return y;
}
/* Generalised global memory version for any number of threads.
* Note only runs up to 227 at a time, rest loop and block till all done.
* Runs fractional warps at each end so not perfect utilisation.
* Uses 228 words of auto allocated shared mem. */
__device__ static uint
mt19937gl(void)
{
int jj;
int kk;
uint y;
int tid; /* Offset thread ID */
__shared__ uint seed[N - M + 1];
kk = mod(mtNext + threadIdx.x, N); /* G80 limited to 512 threads */
__syncthreads(); /* Finish with mtNext & g_seeds set from init */
if (threadIdx.x == blockDim.x - 1)
{
mtNext = kk + 1; /* Modded next call */
}
jj = 0;
do
{
__syncthreads(); /* g_seeds set from last loop */
tid = threadIdx.x - jj;
if (0 <= tid && tid < N - M)
{
seed[tid + 1] = g_seeds[blockIdx.x][mod(kk + 1, N)]; /* Sequential but not aligned */
y = min(N - M, blockDim.x - jj);
if (tid == y - 1) /* Last thread this loop */
{
seed[0] = mtNexti;
mtNexti = seed[y];
}
}
__syncthreads(); /* seed[] ready */
if (0 <= tid && tid < N - M)
{
y = (seed[tid] & UPPER_MASK) | (seed[tid + 1] & LOWER_MASK);
y = g_seeds[blockIdx.x][kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1];
g_seeds[blockIdx.x][kk] = y; /* Does not overlap reads above */
}
} while ((jj += N - M) < blockDim.x);
y ^= (y >> 11); /* Tempering */
y ^= (y << 7) & TEMPER1;
y ^= (y << 15) & TEMPER2;
y ^= (y >> 18);
return y;
}
/*************************************************************************************
* This is a shared memory implementation that keeps the full 626 words of state
* in shared memory. Faster for heavy random work where you can afford shared mem. */
__shared__ int mtNexts; /* Start of next block of seeds */
__shared__ uint s_seeds[N + 1];
/* Init by single seed - single threaded as only used once */
__device__ static void
mt19937si(uint seed)
{
int i;
if (threadIdx.x == 0)
{
mtNexts = 0;
s_seeds[0] = seed;
for (i = 1; i < N; i++)
{
seed = (INIT_MULT * (seed ^ (seed >> 30)) + i);
s_seeds[i] = seed;
}
}
__syncthreads(); /* Ensure mtNexts set & needed for mt19937w() */
return;
}
/* Init by array - single threaded as only used once */
__device__ static void
mt19937sai(uint* seeds, uint length)
{
mt19937si(ARRAY_SEED);
if (threadIdx.x == 0)
{
int i = 1;
int j = 0;
int k;
for (k = N > length ? N : length; k != 0; k--)
{
s_seeds[i] = (s_seeds[i] ^ ((s_seeds[i - 1] ^ (s_seeds[i - 1] >> 30)) * 1664525)) + seeds[j] + j;
if (++i >= N)
{
s_seeds[0] = s_seeds[N - 1];
i = 1;
}
if (++j >= length)
{
j = 0;
}
}
for (k = N - 1; k != 0; k--)
{
s_seeds[i] = (s_seeds[i] ^ ((s_seeds[i - 1] ^ (s_seeds[i - 1] >> 30)) * 1566083941)) - i;
if (++i >= N)
{
s_seeds[0] = s_seeds[N - 1];
i = 1;
}
}
s_seeds[0] = 0x80000000; /* MSB is 1; assuring non-zero initial array */
}
__syncthreads(); /* Needed for mt19937w() */
return;
}
/* Return next MT random by increasing thread ID for 1-227 threads. */
__device__ static uint
mt19937s(void)
{
int kk;
uint y;
const int tid = threadIdx.x;
kk = mod(mtNexts + tid, N);
__syncthreads(); /* Finished with mtNexts & s_seed[] ready from last run */
if (tid == blockDim.x - 1)
{
mtNexts = kk + 1; /* Will get modded on next call */
}
y = (s_seeds[kk] & UPPER_MASK) | (s_seeds[kk + 1] & LOWER_MASK);
y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1];
//y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ (y & 1 ? MATRIX_A : 0); // Same speed
__syncthreads(); /* All done before we update */
s_seeds[kk] = y;
if (kk == 0) /* Copy up for next round */
{
s_seeds[N] = y;
}
y ^= (y >> 11); /* Tempering */
y ^= (y << 7) & TEMPER1;
y ^= (y << 15) & TEMPER2;
y ^= (y >> 18);
return y;
}
/* General shared memory version for any number of threads.
* Note only up to 227 threads are run at any one time,
* the rest loop and block till all are done. */
__device__ static uint
mt19937sl(void)
{
int jj;
int kk;
uint y;
int tid; /* Offset thread ID */
kk = mod(mtNexts + threadIdx.x, N); /* G80 limited to 512 threads */
__syncthreads(); /* Finished with mtNexts & s_seed[] ready from init */
if (threadIdx.x == blockDim.x - 1)
{
mtNexts = kk + 1; /* Will get modded on next call */
}
jj = 0;
do
{
__syncthreads(); /* s_seeds[] ready from last loop */
tid = threadIdx.x - jj;
if (0 <= tid && tid < N - M)
{
y = (s_seeds[kk] & UPPER_MASK) | (s_seeds[kk + 1] & LOWER_MASK);
y = s_seeds[kk < N - M ? kk + M : kk + (M - N)] ^ (y >> 1) ^ mag01[y & 1];
}
__syncthreads(); /* All done before we update */
if (0 <= tid && tid < N - M)
{
s_seeds[kk] = y;
if (kk == 0)
{
s_seeds[N] = y;
}
}
} while ((jj += N - M) < blockDim.x);
y ^= (y >> 11); /* Tempering */
y ^= (y << 7) & TEMPER1;
y ^= (y << 15) & TEMPER2;
y ^= (y >> 18);
return y;
}
/***************************************************************************************
* This is an implementation of a full step in 1 call - all 624 results returned at once
* in pairs - 64 bit version. It may be run with 227-312 threads and will drop numbers
* from the sequence if < 312 (not incorrect).
* Original idea for this version was first presented by Brian Budge. */
#define B2 224 /* Size of second block */
__device__ static uint2
mt19937w(const int tid)
{
int kk;
uint y;
uint2 ret;
kk = tid;
/* First 227 */
if (kk < N-M) {
y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK);
y = s_seeds[kk+M] ^ (y >> 1) ^ mag01[y & 1];
}
__syncthreads();
if (kk < N-M) {
s_seeds[kk] = y;
if (kk == 0)
{
s_seeds[N] = y;
}
}
kk += N-M;
__syncthreads();
/* Next 224 */
if (kk < N-M + B2) {
y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK);
y = s_seeds[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 1];
}
__syncthreads();
if (kk < N-M + B2) {
s_seeds[kk] = y;
}
kk += B2;
__syncthreads();
/* Last 173 */
if (kk < N) {
y = (s_seeds[kk]&UPPER_MASK)|(s_seeds[kk+1]&LOWER_MASK);
y = s_seeds[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 1];
}
__syncthreads();
if (kk < N) {
s_seeds[kk] = y;
}
__syncthreads();
ret.x = s_seeds[2*tid];
ret.x ^= (ret.x >> 11); /* Tempering */
ret.x ^= (ret.x << 7) & TEMPER1;
ret.x ^= (ret.x << 15) & TEMPER2;
ret.x ^= (ret.x >> 18);
ret.y = s_seeds[2*tid+1];
ret.y ^= (ret.y >> 11);
ret.y ^= (ret.y << 7) & TEMPER1;
ret.y ^= (ret.y << 15) & TEMPER2;
ret.y ^= (ret.y >> 18);
return ret;
}
/*******************************************************************************
* For reference this is the original C single threaded source: */
#if 0
static unsigned long mt[N]; /* the array for the state vector */
static int mti=N+1; /* mti==N+1 means mt[N] is not initialized */
/* initializes mt[N] with a seed */
void init_genrand(unsigned long s)
{
mt[0]= s & 0xffffffffUL;
for (mti=1; mti<N; mti++) {
mt[mti] =
(1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti);
/* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */
/* In the previous versions, MSBs of the seed affect */
/* only MSBs of the array mt[]. */
/* 2002/01/09 modified by Makoto Matsumoto */
mt[mti] &= 0xffffffffUL;
/* for >32 bit machines */
}
}
/* initialize by an array with array-length */
/* init_key is the array for initializing keys */
/* key_length is its length */
/* slight change for C++, 2004/2/26 */
void init_by_array(unsigned long init_key[], int key_length)
{
int i, j, k;
init_genrand(19650218UL);
i=1; j=0;
k = (N>key_length ? N : key_length);
for (; k; k--) {
mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1664525UL))
+ init_key[j] + j; /* non linear */
mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
i++; j++;
if (i>=N) { mt[0] = mt[N-1]; i=1; }
if (j>=key_length) j=0;
}
for (k=N-1; k; k--) {
mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL))
- i; /* non linear */
mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
i++;
if (i>=N) { mt[0] = mt[N-1]; i=1; }
}
mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */
}
/* generates a random number on [0,0xffffffff]-interval */
unsigned long genrand_int32(void)
{
unsigned long y;
static unsigned long mag01[2]={0x0UL, MATRIX_A};
/* mag01[x] = x * MATRIX_A for x=0,1 */
if (mti >= N) { /* generate N words at one time */
int kk;
if (mti == N+1) /* if init_genrand() has not been called, */
init_genrand(5489UL); /* a default initial seed is used */
for (kk=0;kk<N-M;kk++) {
y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK);
mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1UL];
}
for (;kk<N-1;kk++) {
y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK);
mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1UL];
}
y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK);
mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1UL];
mti = 0;
}
y = mt[mti++];
/* Tempering */
y ^= (y >> 11);
y ^= (y << 7) & 0x9d2c5680UL;
y ^= (y << 15) & 0xefc60000UL;
y ^= (y >> 18);
return y;
}
#endif
|
11,866 | #include <stdio.h>
__global__ void reducePI(float *d_sum, int num)
{
int id = blockIdx.x * blockDim.x + threadIdx.x; //线程数
int gid = id;
float temp;
extern float __shared__ s_pi[]; // 动态分配长度为block的线程数
s_pi[threadIdx.x] = 0.0f;
while (gid < num) {
temp = (gid + 0.5f) / num; // 当前x的值
s_pi[threadIdx.x] += 4.0f;
gid += blockDim.x * gridDim.x;
}
for (int i = (blockDim.x>>1); i > 0; i>>=1) {
if (threadIdx.x < i) {
s_pi[threadIdx.x] += s_pi[threadIdx.x+i];
}
__syncthreads();
}
if (threadIdx.x == 0) d_sum[blockIdx.x] = s_pi[0];
}
__global__ void reducePI2(float *d_sum,int num,float *d_pi)
{
int id=threadIdx.x;
extern float __shared__ s_sum[];
s_sum[id]=d_sum[id];
__syncthreads();
for(int i = (blockDim.x>>1); i>0; i>>=1){
if(id<i) s_sum[id]+=s_sum[id+i];
__syncthreads();
}
// printf("%d,%f\n",id,s_sum[id]);
if(id==0)
{
*d_pi=s_sum[0]/num;
// printf("%d,%f\n",id,*pi);
}
}
int main()
{
return 0;
} |
11,867 | #include <stdio.h>
#include <cuda_runtime.h>
#define N 1024 // vector size
#define TxB 32 // threads x block
/*
* kernel: branch
*/
__global__ void pari_dispari_1(int *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int a,b;
a = b = 0;
if (tid % 2 == 0)
a = 2;
else
b = 1;
c[tid] = a + b;
}
int main(void) {
int *c;
int *dev_c;
int nBytes = N * sizeof(int);
// malloc host memory
c = (int *)malloc(nBytes);
// malloc device memory
cudaMalloc((void**) &dev_c, nBytes);
pari_dispari_1<<<N, TxB>>>(dev_c);
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy(c, dev_c, nBytes, cudaMemcpyDeviceToHost);
// display the results
for (int i = 0; i < N; i++) {
printf("%d\n", c[i]);
}
// Free host memory
free(c);
// free the memory allocated on the GPU
cudaFree(dev_c);
return 0;
}
|
11,868 | #include <cuda_runtime.h>
#include <stdio.h>
__device__ float reference(double x)
{
const double sp = log1p(exp(x));
const double grad_sp = -expm1(-sp);
const double tsp = tanh(sp);
const double grad_tsp = (1 - tsp*tsp) * grad_sp;
const double grad = x * grad_tsp + tsp;
return grad;
}
__device__ float softplus_kernel(float x, float threshold = 20) {
if (x > threshold) return x;
else if (x < -threshold) return expf(x);
return log1pf(expf(x));
}
__device__ float dn_reference(float x)
{
const float MISH_THRESHOLD = 20.0f;
const float inp = x;
const float sp = softplus_kernel(inp, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
return grad;
}
__global__ void test()
{
for (float x = -100; x < 10; x += 0.1)
{
// double precision reference
float ref = reference(x);
auto e = __expf(x);
auto n = e * e + 2 * e;
const float tsp = __fdividef(n, n + 2);
const float grad_tsp = __fdividef(e * e + e, n * n * 0.25 + n + 1);
const float grad = x * grad_tsp + tsp;
float expr1 = grad;
float expr2 = dn_reference(x);
float expr3 = 0;//4 * x * __fdividef(e + 1, n + 2) * __fdividef(e, n + 2) + __fdividef(n, n + 2); //4 * x / e / e + 1;
double err1 = abs(double(ref) - double(expr1));
double err2 = abs(double(ref) - double(expr2));
double err3 = abs(double(ref) - double(expr3));
int temp;
printf("[x=%f] %.7e %.7e %.7e %.7e (%.7e, %.7e, %.7e, %.7e)\n",
x, ref, expr1, expr2, expr3,
//frexpf(ref, &temp), frexpf(expr1, &temp), frexpf(expr2, &temp), frexpf(expr3, &temp),
0.0f, float(err1), float(err2), float(err3));
}
}
__device__ float mish_final(float value)
{
auto e = __expf(value);
auto n = e * e + 2 * e;
if (value <= -0.6f)
return value * __fdividef(n, n + 2);
return value - 2 * __fdividef(value, n + 2);
}
__global__ void test_final()
{
for (float x = -100; x < 100; x += 0.1)
{
float ref = reference(x);
float expr = mish_final(x);
printf("[x=%f] %.7e %.7e (err=%.8e)\n", x, ref, expr, abs(expr - ref));
}
}
int main ()
{
test<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
} |
11,869 | // filename: freduce.cu
#include <stdint.h>
//======================================================================
#ifdef __CUDA__
__device__ __host__
#endif
void reduce_hash(uint32_t H[], uint8_t B[], uint32_t link_idx);
//======================================================================
#ifdef __CUDA__
__device__ __host__
#endif
void reduce_hash(uint32_t H[], uint8_t B[], uint32_t link_idx) {
uint32_t z;
uint16_t b0,b1;
const uint16_t mask = 0xffff;
uint32_t offset = link_idx;
z = H[0] + offset;
b0 = (uint16_t)(z & mask);
B[0] = (b0 % 26) + 'A';
z >>= 16;
b1 = (uint16_t)(z & mask);
B[1] = (b1 % 26) + 'A';
z = H[1] + offset;
b0 = (uint16_t)(z & mask);
B[2] = (b0 % 10) + '0';
z >>= 16;
b1 = (uint16_t)(z & mask);
B[3] = (b1 % 10) + '0';
z = H[2] + offset;
b0 = (uint16_t)(z & mask);
B[4] = (b0 % 26) + 'a';
z >>= 16;
b1 = (uint16_t)(z & mask);
B[5] = (b1 % 26) + 'a';
z = H[3] + offset;
b0 = (uint16_t)(z & mask);
B[6] = (b0 % 26) + 'A';
B[7] = '\0';
}
//=============================================================================
|
11,870 | /*
* File description: cuconvo_tests.cu
* Author information: Mike Ranzinger mranzinger@alchemyapi.com
* Copyright information: Copyright Orchestr8 LLC
*/
|
11,871 | #include "includes.h"
__global__ void lhkde ( const int n, const float *a, const float *b, float *l, float *h ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < n ) {
l[i] = a[i] - 3 * b[i];
h[i] = a[i] + 3 * b[i];
}
} |
11,872 | /*
* A try at implementing the MED algorithm on a GPU.
*
* === med-kernel: ===
* Essentially an implementation of the MED algorithm from Jurafsky
* and Martin tailored to a GPU.
* Each string to be compared is first converted into an int array,
* where each element is the index into the vocabulary _dd_ table that
* was built beforehand.
* Before the kernel is called, the device is prepared with a very large
* int array representing all target strings (all strings in the alert_data
* table) laid out end to end. A few other small pieces of information that
* remain constant are also sent: ntargets and tbi. On the device is
* also reserved space for an edits array where each block will store its
* output at the end of every cycle.
* Before each kernel invocation, a new orig array _od_ (also an int array
* representing indices of tokens into the vocabulary table) and an olen int
* are reserved and CudaMemCopied to the device. Also, space for each block to
* make its distance calculations is reserved.
* _distance_ is an array with [ntargets * (olen + 1) * 2] elements. Each half of
* the array is treated as one column of the J&M distance matrix (see below).
* Where the original MED algorithm uses a row++ loop inside a column++ loop,
* this algorithm swaps between 'columns'. I can do this because I'm only
* concerned with the edit distance -- not the moves required to change the
* string.
* The number of rows each thread handles is the length of the original string,
* thus each iteration of the inner loop will take the same number of steps for
* each thread. The number of columns each thread handles, however, is a
* function of the length of the target string, which will vary from one thread
* to the next. Hence, some threads will make more or fewer outer loop
* iterations with respect to other threads.
* Conceptually, think of the relationship between threads and loops as such:
*
* | | | | | |
* | | | | | |
* | | | | | |
* | | | | | |
* | | | | | |
* ==== == ===== ======= == ====
* tid=0 tid=1 tid=2 tid=3 ...
*
* In fact, the number of rows in _distance_ is olen * ntargets == olen *
* nblocks. In this diagram, think of each vertical line as being an original string
* that will be used by one thread. Each vertical line is in fact a segment of the
* array named _distance_.
* The x axis is truer to reality - this is the targets array of all alert
* strings laid end-to-end.
* Each delimited cartesian space is the responsibility of one thread.
* Each vertical line is an int array representing the original string, so there are the
* same number of rows across the board. Since the length of each target
* string is variable, the space calculated by one thread is not constant.
*
* I've designed the 2-column distance table to be a linear array representing
* 2 columns. This is because of the greater simplicity of allocating the necessary
* memory on the device and of accessing its elements. To accomodate the new
* circumstances, I've added two pointers cur_col and last_col which will point to
* either the head of distance (the distance pointer itself) or to the address halfway
* into the distance array, depending on which column of the distance table is being
* filled versus which was just filled. I have also added macros current and last
* to return the correct pointer as a function of cindex % 2.
*/
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
// The next two kernel definitions are variations on the same theme. They accomplish the
// same task -- one with a while loop, the other with a for loop. Duplication strictly for
// experimentation purposes to test performance.
__global__ void med_kernel_while( int *orig, // original string; must be freed at kernel termination
int olen, // length of orig
int *edits, // result array where each block stores its calculation
int *targets, // compound array of all other target strings
int *distance, // scratch pad for blocks to build MED table; must be
// freed at kernel termination
int *tbi, // (target-begin-index) array of indices where each block
// will find the start of its
// target string
int stargets, // total number of elements in targets array
int ntargets
) {
int tid = threadIdx.x;
int tlength = tbi[tid+1] - tbi[tid];
int *target = &targets[tbi[tid]];
int *dcol1 = &distance[(olen+1)*tid];
int *dcol2 = &distance[(olen+1)*tid + (olen+1)*ntargets];
int *current = dcol1;
int *last = dcol2;
int row, col, n1, n2, n3, petitmin, grandmin;
// initialize first column
row = col = 0;
current[row] = 0;
for (row=1; row<olen+1; row++) current[row] = current[row-1] + 1;
// fill in distance matrix
while ( col<tlength ) {
current = (current == dcol1 ? dcol2 : dcol1);
last = (last == dcol1 ? dcol2 : dcol1);
// bottom row must be initialized piecemeal
row = 0;
current[row] = last[row] + 1;
for (row=1; row<olen+1; row++) {
// calculate three possible values...
n1 = last[row] + 1;
n2 = current[row-1] + 1;
n3 = (orig[row-1] == target[col] ? last[row-1] : last[row-1] + 2);
// ...and find minimum
petitmin = (n1 < n2 ? n1 : n2);
grandmin = (petitmin < n3 ? petitmin : n3);
// add row
current[row] = grandmin;
} // end inner loop = build rows of a column
col++;
} // end outer loop = build columns of target
// wait for all threads to finish and submit MED
edits[tid] = current[olen];
__syncthreads();
} // end med_kernel
__global__ void med_kernel_for( int *orig, // original string; must be freed at kernel termination
int olen, // length of orig
int *edits, // result array where each block stores its calculation
int *targets, // compound array of all other target strings
int *distance, // scratch pad for blocks to build MED table; must be
// freed at kernel termination
int *tbi, // (target-begin-index) array of indices where each block
// will find the start of its
// target string
int stargets, // total number of elements in targets array
int ntargets
) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (! (tid < ntargets)) return;
int tlength = tbi[tid+1] - tbi[tid];
int *target = &targets[tbi[tid]];
int *dcol1 = &distance[(olen+1)*tid];
int *dcol2 = &distance[(olen+1)*tid + (olen+1)*ntargets];
int *current = dcol1;
int *last = dcol2;
int row, col, n1, n2, n3, petitmin, grandmin;
// initialize first column
row = col = 0;
current[row] = 0;
for (row=1; row<olen+1; row++) current[row] = current[row-1] + 1;
// fill in distance matrix
for (col=0; col<tlength; col++) {
current = (current == dcol1 ? dcol2 : dcol1);
last = (last == dcol1 ? dcol2 : dcol1);
// bottom row must be initialized piecemeal
row = 0;
current[row] = last[row] + 1;
for (row=1; row<olen+1; row++) {
// calculate three possible values...
n1 = last[row] + 1;
n2 = current[row-1] + 1;
n3 = (orig[row-1] == target[col] ? last[row-1] : last[row-1] + 2);
// ...and find minimum
petitmin = (n1 < n2 ? n1 : n2);
grandmin = (petitmin < n3 ? petitmin : n3);
// add row
current[row] = grandmin;
} // end inner loop = build rows of a column
} // end outer loop = build columns of target
// wait for all threads to finish and submit MED
__syncthreads();
edits[tid] = current[olen];
//edits[tid] = grandmin;
} // end med_kernel
extern "C"
void call_med_kernel(
int *orig, // original string; must be freed at kernel termination
int olen, // length of orig
int *edits, // result array where each block stores its calculation
int *targets, // compound array of all other target strings
int *distance, // scratch pad for blocks to build MED table; must be
int *tbi,
int starg,
int ntarg
) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int maxthr, blocks;
maxthr = prop.maxThreadsPerBlock;
blocks = ntarg / maxthr + 1;
med_kernel_for<<<blocks, maxthr>>>( orig, olen, edits, targets, distance, tbi, starg, ntarg );
}
extern "C"
void call_cudaMalloc(int **d_ptr, int bytes) {
cudaMalloc( (void**) d_ptr, bytes );
}
extern "C"
void call_cudaMemcpy(int *to_ptr, int *from_ptr, int bytes, int flag){
cudaMemcpy(to_ptr, from_ptr, bytes, (flag ? cudaMemcpyDeviceToHost : cudaMemcpyHostToDevice));
}
extern "C"
void call_cudaFree(int *dev_ptr) {
cudaFree(dev_ptr);
}
|
11,873 | //MatAdd.cu
// author: Pan Yang
// date : 2015-7-5
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define M 80 // height of A
#define N 48 // width of A ( == height of B)
#define P 128 // width of B
#define BLOCK_SIZE 16
typedef struct {
int height;
int width;
int stride;
float *elements;
}Matrix;
// cpu code definition
void MatMulOnHost(const Matrix A, const Matrix B, Matrix C)
{
int i, j, k;
for (i = 0; i < A.height; ++i)
{
for (j = 0; j < B.width; ++j)
{
float sum_ij = 0;
for (k = 0; k < A.width; ++k)
sum_ij += A.elements[i * A.width + k] * B.elements[k * B.width + j];
C.elements[i * C.width + j] = sum_ij;
}
}
}
// Kernel definition for Matrix multiplication
__global__ void MatMul(const Matrix A, const Matrix B, Matrix C)
{
/// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = A.width * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + A.width - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * B.width;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Cvalue = 0;
// shared memory used to store Asub and Bsub
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// loop over all the sub matrices of A and B that are required to compute Csub,
// multiply each pair of sub matrices together and sum the results
int a, b, k;
for (a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
// load Asub and Bsub from global memory to shared memory
// Each thread loads one element of each sub-matrix
As[ty][tx] = A.elements[a + A.width * ty + tx];
Bs[ty][tx] = B.elements[b + B.width * ty + tx];
// synchronize to make sure the sub-matrices are loaded before starting calculation
__syncthreads();
// multiply Asub and Bsub together
for (k = 0; k < BLOCK_SIZE; ++k)
Cvalue += As[ty][k] * Bs[k][tx];
// synchronize to make sure that the preceding calculation is done before loading
// two new sub-matrices
__syncthreads();
}
// write Cvalue to global memory, each thread write one element
int c = C.width * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C.elements[c + C.width * ty + tx] = Cvalue;
}
int main()
{
// get the basic infomation of GPU device
printf("\n");
printf("[Matrix Multiply Using CUDA] - Starting...\n");
int devID = 0;
cudaDeviceProp deviceProp;
cudaError_t error;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// load A, B, C on host
Matrix A;
A.height = M;
A.width = N;
size_t size = A.height * A.width * sizeof(float);
A.elements = (float *)malloc(size);
Matrix B;
B.height = N;
B.width = P;
size = B.height * B.width * sizeof(float);
B.elements = (float *)malloc(size);
// initialize A
srand(rand());
int i;
for (i = 0; i < A.height * A.width; ++i)
{
A.elements[i] = rand() / (float)RAND_MAX;
//A.elements[i] = 1.0;
}
// initialize B
srand(rand());
for (i = 0; i < B.height * B.width; ++i)
{
B.elements[i] = rand() / (float)RAND_MAX;
//B.elements[i] = 0.01;
}
Matrix C;
C.height = M;
C.width = P;
size = C.height * C.width * sizeof(float);
C.elements = (float *)malloc(size); // default all zeros
Matrix ref_C; // reference C for result check
ref_C.height = M;
ref_C.width = P;
size = ref_C.height * ref_C.width * sizeof(float);
ref_C.elements = (float *)malloc(size); // default all zeros
// load a, b, c on device
Matrix d_A;
d_A.height = M;
d_A.width = N;
size = d_A.height * d_A.width * sizeof(float);
cudaMalloc(&d_A.elements, size); // alloc memory on device
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); // copy data from host to device
Matrix d_B;
d_B.height = N;
d_B.width = P;
size = d_B.height * d_B.width * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
Matrix d_C;
d_C.height = M;
d_C.width = P;
size = d_C.height * d_C.width * sizeof(float);
cudaMalloc(&d_C.elements, size); // default are not zeros, maybe random number
// Kernel invocation with m*16*16 threads
dim3 BlockDim(BLOCK_SIZE, BLOCK_SIZE);
dim3 GridDim((P + BlockDim.x - 1) / BlockDim.x, (M + BlockDim.x - 1) / BlockDim.y);
cudaEvent_t start_cu, stop_cu;
float time_gpu = 0.0f;
cudaEventCreate(&start_cu);
cudaEventCreate(&stop_cu);
cudaEventRecord( start_cu, 0);
int nIter = 300;
for (i = 0; i < nIter; ++i)
{
MatMul<<<GridDim, BlockDim>>>(d_A, d_B, d_C);
}
cudaEventRecord( stop_cu, 0);
cudaEventSynchronize( stop_cu );
cudaEventElapsedTime( &time_gpu, start_cu, stop_cu );
cudaEventDestroy( start_cu );
cudaEventDestroy( stop_cu );
float msecPerMatrixMul = time_gpu / nIter;
double flopsPerMatrixMul = 2.0 * (double)A.width * (double)A.height * (double)B.width;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
BlockDim.x * BlockDim.y);
// copy results form device memory to host memory
cudaMemcpy( C.elements, d_C.elements, size, cudaMemcpyDeviceToHost );
MatMulOnHost(A, B, ref_C);
printf("GridDim: (%d, %d)\n", GridDim.x, GridDim.y);
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu| / <|x|, |y|> < eps
printf("Checking computed result for correctness ... \n");
bool correct = true;
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(M * P); i++)
{
double abs_err = fabs(ref_C.elements[i] - C.elements[i]);
double dot_length = A.width;
double abs_val = fabs(ref_C.elements[i]);
double rel_err = abs_err / abs_val / dot_length ;
if (rel_err > eps)
{
//printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, ref_C.elements[i], C.elements[i], eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// free space on host and device
free(C.elements);
free(B.elements);
free(A.elements);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
return 0;
}
|
11,874 | #include "includes.h"
__global__ void _calculate_edge_num( long* edge_num, const long* edge_num_sum, const long* edge_idx_sort, const int b, const int n, const int orig_p_num, const long p_num ) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= b * n)
return;
const int c_b = index / n;
const int c_n = index % n;
long* c_edge_num = &edge_num[c_b * n * orig_p_num + c_n * orig_p_num];
const long c_edge_num_sum = edge_num_sum[c_b * n + c_n];
const long* c_edge_idx_sort = &edge_idx_sort[c_b * n * orig_p_num + c_n * orig_p_num];
if (c_edge_num_sum == p_num)
return;
if (c_edge_num_sum < p_num)
c_edge_num[c_edge_idx_sort[0]] += p_num - c_edge_num_sum;
else {
int id = 0;
long pass_num = c_edge_num_sum - p_num;
while (pass_num > 0) {
long edge_idx = c_edge_idx_sort[id];
if (c_edge_num[edge_idx] > pass_num) {
c_edge_num[edge_idx] -= pass_num;
pass_num = 0;
} else {
pass_num -= c_edge_num[edge_idx] - 1;
c_edge_num[edge_idx] = 1;
id += 1;
}
}
}
} |
11,875 |
/*
* File smc_impl_nested.cuh contains the implementation of the nested SMC.
* This file is included by smc_impl.cuh and relies on the includes in smc_impl.cuh.
*/
// Nested inference is never used, and this code has become obsolete.
/*
#include "macros/macros.cuh"
#include "smc.cuh"
#include "dists/dists.cuh"
#include "particles_memory_handler.cuh"
#include "resample/systematic/systematic_cpu.cuh"
#ifdef __NVCC__
#include <curand_kernel.h>
#include "utils/cuda_error_utils.cuh"
#include "resample/systematic/systematic_gpu.cuh"
#include "smc_kernels.cuh"
#endif
DEV double runSMCNested(
#ifdef __NVCC__
curandState* randState,
#endif
pplFunc_t* bblocks, int numBblocks, int numParticles, size_t progStateSize, bool parallelExec, bool parallelResampling, int parentIdx,
callbackFunc_t callback, void* ret, void* arg) {
if(parallelExec || parallelResampling) {
#ifndef GPU
printf("Cannot run in parallel when not compiled for GPU");
return 0.0;
#endif
}
bool requireRandStates = parallelExec;
floating_t logNormConstant = 0;
particles_t particles = allocateParticlesNested(numParticles, progStateSize);
#ifdef __NVCC__
const int NUM_BLOCKS = (numParticles + NUM_THREADS_PER_BLOCK_NESTED - 1) / NUM_THREADS_PER_BLOCK_NESTED;
curandState* randStates;
if(requireRandStates) {
randStates = new curandState[numParticles];
initCurandStates<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED>>>(randStates, numParticles, parentIdx);
cudaDeviceSynchronize();
cudaCheckErrorDev();
}
#endif
resampler_t resampler = initResamplerNested(numParticles, progStateSize);
// Run program/inference
while(true) {
if(parallelExec) {
#ifdef __NVCC__
// Use nested randStates
execFuncs<<<NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED>>>(randStates, particles, bblocks, numParticles, numParticles, numBblocks, arg);
cudaDeviceSynchronize();
cudaCheckErrorDev();
#endif
} else {
for(int i = 0; i < numParticles; i++) {
int pc = particles.pcs[i];
if(pc < numBblocks) {
bblocks[pc](
#ifdef __NVCC__
randState,
#endif
particles, i, arg);
}
}
}
floating_t logWeightSum;
if(parallelResampling) {
#ifdef __NVCC__
logWeightSum = calcLogWeightSumGpu(particles.weights, resampler, numParticles, NUM_BLOCKS, NUM_THREADS_PER_BLOCK_NESTED);
#endif
} else {
logWeightSum = calcLogWeightSumCpu(particles.weights, resampler, numParticles);
}
logNormConstant += logWeightSum - log(static_cast<floating_t>(numParticles));
if(particles.pcs[0] >= numBblocks) // Assumption: All terminate at the same time
break;
if(parallelResampling) {
#ifdef __NVCC__
resampleSystematicGpuNested(randState, particles, resampler, numParticles, NUM_BLOCKS);
#endif
} else {
resampleSystematicCpu(
#ifdef __NVCC__
randState,
#endif
particles, resampler, numParticles);
}
}
callback(particles, numParticles, ret);
// Clean up
destResamplerNested(resampler);
freeParticlesNested(particles);
#ifdef __NVCC__
if(requireRandStates)
delete[] randStates;
#endif
return logNormConstant;
}
*/
|
11,876 | #include "stdio.h"
__global__ void add_arrays_gpu( float *in1, float *in2, float *out, int Ntot) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if ( idx <Ntot )
out[idx]=in1[idx]+in2[idx];
}
int main() {
// pointers to host memory
float *a, *b, *c;
// pointers to device memory
float *a_d, *b_d, *c_d;
int N=18;
int i;
// Allocate arrays a, b and c on host
a = (float*) malloc(N*sizeof(float));
b = (float*) malloc(N*sizeof(float));
c = (float*) malloc(N*sizeof(float));
// Allocate arrays a_d, b_d and c_d on device
cudaMalloc ((void **) &a_d, sizeof(float)*N);
cudaMalloc ((void **) &b_d, sizeof(float)*N);
cudaMalloc ((void **) &c_d, sizeof(float)*N);
// Initialize arrays a and b
for (i=0; i<N; i++) {
a[i]= (float) 2*i;
b[i]=-(float) i;
}
// Copy data from host memory to device memory
cudaMemcpy(a_d, a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, sizeof(float)*N, cudaMemcpyHostToDevice);
// Compute the execution configuration
int block_size=8;
dim3 dimBlock(block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
// Add arrays a and b, store result in c
add_arrays_gpu<<<dimGrid,dimBlock>>>(a_d, b_d, c_d, N);
// Copy data from deveice memory to host memory
cudaMemcpy(c, c_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// Print c
printf("addVectors will generate two vectors, move them to the global memory, and add them together in the GPU\n");
for (i=0; i<N; i++) {
printf(" a[%2d](%10f) + b[%2d](%10f) = c[%2d](%10f)\n",i,a[i],i,b[i],i,c[i]);
}
// Free the memory
free(a); free(b); free(c);
cudaFree(a_d); cudaFree(b_d);cudaFree(c_d);
}
|
11,877 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
int main()
{
int n;
cudaGetDeviceCount(&n);
cout<<"Number of CUDA enabled devices : "<<n<<endl;
if(n!=0)
{
for(int i=0;i<n;i++)
{
cout<<"Device No. : "<<i<<endl;
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, i);
cout<<"\tDevice Name : "<<iProp.name<<endl;
cout<<"\tNo. of multiprocessors : "<<iProp.multiProcessorCount<<endl;
cout<<"\tClock rate : "<<iProp.clockRate<<" kHz"<<endl;
cout<<"\tCoumpute Capability : "<<iProp.major<<"."<<iProp.minor<<endl;
cout<<"\tTotal Global Memory : "<<iProp.totalGlobalMem<<" B"<<endl;
cout<<"\tTotal Constant Memory : "<<iProp.totalConstMem<<" B"<<endl;
cout<<"\tShared Memory per Block : "<<iProp.sharedMemPerBlock<<" B"<<endl;
cout<<"\tRegisters per block : "<<iProp.regsPerBlock<<endl;
cout<<"\tWarp Size : "<<iProp.warpSize<<endl;
cout<<"\tMaximum thread per block : "<<iProp.maxThreadsPerBlock<<endl;
cout<<"\tMaximum thread dimensions : ("<<iProp.maxThreadsDim[0]<<", "<<iProp.maxThreadsDim[1]<<", "<<iProp.maxThreadsDim[2]<<")"<<endl;
cout<<"\tMaximum grid size : ("<<iProp.maxGridSize[0]<<", "<<iProp.maxGridSize[1]<<", "<<iProp.maxGridSize[2]<<")"<<endl;
}
}
return 0;
} |
11,878 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// função executada na GPU
__global__ void GPU_sort (int *vet_d, int vet_size,int nthreads) {
int k = threadIdx.x;
printf("Nucleo %d\n",k );
int part = vet_size / nthreads; //== cada trede ordenara quatro posições do vetor[40]
/**
0 < i=0 < 4 .... 4 < i=1 < 8 .... 8 < i=2 < 12 ... 12 < i=3 < 18
*/
int a = k*part;
int b = k*part+part;
int i=0,j=0;
int min_idx=0,temp;
for(i=a;i<b;i++){
min_idx = i;
for(j=i+1;j<b;j++){
if(vet_d[j]<vet_d[min_idx]){
min_idx = j;
}
}
temp = 0;
temp = vet_d[min_idx];
vet_d[min_idx] = vet_d[i];
vet_d[i] = temp;
}
/*
for(i=a;i<b;i++){
printf("v[%i]:%d\n",i,vet_d[i] );
}
printf("\n");
*/
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// função executada no HOST
__host__ int *criar_vetor_desordenado(int *v,int vet_size);
__host__ void vet_imprimir(int *v,int vet_size);
int main (int argc, char ** argv) {
int nthreads = 4;
int nblocos = 1;
int vet_size = 20;
if (argc == 2) {
nthreads = atoi(argv[1]);
vet_size = atoi(argv[2]);
}else{
printf ("./main <nthreads> <vet_size>\n");
printf ("Caso não haja passagem de parâmetros, nthreads=4 e vet_size=20\n");
}
//vetores do host
int *vet_desordenado=NULL, *vet_ordenado=NULL;
vet_desordenado = criar_vetor_desordenado(vet_desordenado,vet_size);//aloca vetor em host
cudaMallocHost((void **) &vet_ordenado, vet_size*sizeof(int));
printf("Vetor desordenado\n");
vet_imprimir(vet_desordenado,vet_size);
int *dev_vet =NULL;
cudaMalloc((void**)&dev_vet,vet_size * sizeof(int));// aloca vetor na memória global da placa
cudaMemcpy (dev_vet, vet_desordenado, vet_size*sizeof(int), cudaMemcpyHostToDevice);
//Cada CUDA core ordena uma partição de DEV_VET
GPU_sort<<<1,nthreads>>>(dev_vet, vet_size,nthreads);
cudaDeviceSynchronize();
cudaMemcpy (vet_ordenado, dev_vet, vet_size*sizeof(int), cudaMemcpyDeviceToHost);
printf("Vetor parcialmente ordenado\n");
vet_imprimir(vet_ordenado,vet_size);
return 0;
}
__host__ int *criar_vetor_desordenado(int *v,int vet_size){
if(v!=NULL){
printf("O vetor informado ja existe!\n");
return v;
}
if(vet_size < 0){
printf("O tamanho do vetor tem que ser maior que 0\n");
}
cudaMallocHost((void **) &v, vet_size*sizeof(int));
//inicia valores do vetor desordenado
for(int i=0;i<vet_size;i++){
v[i]= rand() % vet_size;// (0 <= rand <= vet_size)
}
return v;
}
__host__ void vet_imprimir(int *v,int vet_size){
if(v==NULL){
printf("O vetor informado é NULL!\n");
return;
}
if(vet_size < 0){
printf("O tamanho do vetor tem que ser maior que 0\n");
return;
}
printf("\n");
printf("\n");
for(int i=0;i<vet_size;i++){
printf("%d\n",v[i]);
}
printf("\n");
}
|
11,879 | #include <math.h>
#include <stdio.h>
#include <getopt.h>
#define FILE_SIZE 666
#define BLOCKSIZE 256
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess){
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ int d_m;
__device__ float d_r;
// Kernel functions
__global__
void reduce(int *g_idata, unsigned int *g_out) {
__shared__ int sdata[BLOCKSIZE];
// each thread loads one element from global to shared mem
int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[threadIdx.x] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for (int s=1; s < blockDim.x; s *=2)
{
int index = 2 * s * threadIdx.x;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (threadIdx.x == 0){
atomicAdd(g_out, sdata[0]);
}
}
__device__
bool is_equal(float a, float b, float eps)
{
return fabs(a - b) <= eps ? true : false;
}
__global__
void findvec(float *base_vec, int m, float *in, int *mvec)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int i;
// check if vec of size m is similar
int found = true;
for (i = 0; i < m; i++){
if (!is_equal(in[index + i], base_vec[i], d_r)){
found = false;
break;
}
}
if (found) {
mvec[index] = 1;
}
}
int load_data(char *fname, float *x)
{
FILE *f = fopen(fname, "r");
float buf;
int i = 0;
while(fscanf(f, "%f", &buf) > 0)
x[i++] = buf;
fclose(f);
return i - 1;
}
int countlines(char *fname)
{
FILE *f = fopen(fname, "r");
if (f == NULL)
return -1;
char z, buf;
int linenumbers = 0;
while((z = fgetc(f)) != EOF)
if (z == '\n')
linenumbers++;
buf = z;
printf("last: %i\n", buf);
fclose(f);
return linenumbers;
}
// parameters
typedef struct {
float r; // max distance
int m; // embed dim
char infile[FILE_SIZE]; //
char outfile[FILE_SIZE]; //
bool apen; // 1: approximate not sample entropy
} params;
static struct option options[] = {
{"in", required_argument, NULL, 'i'},
{"out", required_argument, NULL, 'o'},
{"embed", required_argument, NULL, 'm'},
{"radius", required_argument, NULL, 'r'},
{"apen", required_argument, NULL, 'a'},
};
void dump_params(params * p)
{
fprintf(stdout,"#\n");
fprintf(stdout,"#m:%d\n",p->m);
fprintf(stdout,"#r:%f\n",p->r);
fprintf(stdout,"#infile:%s\n",p->infile);
fprintf(stdout,"#outfile:%s\n",p->outfile);
fprintf(stdout,"#approx:%d\n",p->apen);
fflush(stdout);
}
void usage(char **argv)
{
printf("Usage: %s <params> \n\n", argv[0]);
printf("Model params:\n");
printf(" -m, --embed=INT set the embedding dimension 'dim' to INT\n");
printf(" -r, --radius=FLOAT set the maximal distance between vectors\n");
printf(" to 'radius' to FLOAT\n");
printf(" -i, --in=FILE_NAME set the input data to FILE_NAME\n");
printf(" -o, --out=FILE_NAME set the output data to FILE_NAME\n");
printf(" -a, --apen=INT calculates Approximate Entropy (1) instead of SampEn (def, 0)\n");
printf("\n");
}
void parse_arguments(int argc, char **argv, params *p)
{
int c;
while( (c = getopt_long(argc, argv, "mrioa:", options, NULL)) != EOF) {
switch (c) {
case 'm':
sscanf(optarg, "%d", &(p->m));
break;
case 'r':
sscanf(optarg, "%f", &(p->r));
break;
case 'i':
strcpy(p->infile, optarg);
break;
case 'o':
strcpy(p->outfile, optarg);
break;
case 'a':
int buf;
sscanf(optarg, "%i", &buf);
p->apen = buf;
break;
}
}
}
float sandard_deviation(float *x, int N)
{
int i;
float sd = 0, mean = 0;
for (i = 0; i < N; ++i){
mean += x[i];
sd += x[i] * x[i];
}
mean /= N;
sd = sd / N - mean * mean;
return sqrt(sd);
}
int
find_similar_vectors(float *x, int N, int m, int numBlocks, int blockSize, bool apen=false)
{
int i; //dummy idx
// space in shared mem for base vec m
float *base_vec;
gpuErrchk(cudaMallocManaged(&base_vec, m * sizeof(float)));
// space in shared mem for reduction
unsigned int *matches;
gpuErrchk(cudaMallocManaged(&matches, sizeof(unsigned int)));
// space in shared mem for identical vectors
int *mvec;
gpuErrchk(cudaMallocManaged(&mvec, N * sizeof(int)));
// search for EACH possible base vec of length m + 1
unsigned long n = 0;
for (int ibv = 0; ibv <= N - m; ibv++){
// build temporary base vec of length m
for (i = ibv; i < ibv + m; ++i) base_vec[i - ibv] = x[i];
// matches per node
for (i = 0; i < N; ++i) mvec[i] = 0;
// Run kernel on the GPU
// find matches for temporary vec
findvec<<<numBlocks, blockSize>>>(base_vec, m, x, mvec);
gpuErrchk(cudaDeviceSynchronize());
//reduce
matches[0] = 0;
reduce<<<numBlocks, blockSize>>>(mvec, matches);
gpuErrchk(cudaDeviceSynchronize());
n += matches[0] - (apen ? 0 : 1);
}
gpuErrchk(cudaFree(matches));
gpuErrchk(cudaFree(mvec));
return n;
}
int main(int argc, char **argv)
{
params p = {
0.2f,
2,
"data.dat",
"out.dat",
false
};
parse_arguments(argc, argv, &p);
dump_params(&p);
// data
int N = countlines(p.infile);
if (N <= p.m + 1){
printf("m (%d) > length of data (%d)\nExiting...", p.m, N);
return N;
}
// parrallelism
int blockSize = BLOCKSIZE; //4 ;//256;
int numBlocks = (N + blockSize - 1) / blockSize;
// Allocate Unified Memory – accessible from CPU or GPU
float *x;
gpuErrchk(cudaMallocManaged(&x, N * sizeof(float)));
// initialize data
load_data(p.infile, x);
float sd = sandard_deviation(x, N);
// Sampen algorithm initialisation
int m = p.m;
gpuErrchk(cudaMemcpyToSymbol(d_m, &m, sizeof(int), 0, cudaMemcpyHostToDevice));
float r = p.r * sd;
gpuErrchk(cudaMemcpyToSymbol(d_r, &r, sizeof(float), 0, cudaMemcpyHostToDevice));
bool apen = p.apen;
unsigned long n_m = find_similar_vectors(x, N, m, numBlocks, blockSize, apen);
unsigned long n_m_plus_1 = find_similar_vectors(x, N, m + 1, numBlocks, blockSize, apen);
FILE * outFile;
outFile = fopen(p.outfile, "w");
fprintf(outFile, "m vector matches: %lu\n", n_m);
fprintf(outFile, "(m+1) vector matches: %lu\n", n_m_plus_1);
fprintf(outFile, "ratio = n_{m+1}/n_m: %f\n", (float)n_m_plus_1/n_m);
fprintf(outFile, "SampEn = -ln(ratio): %f\n", -log((float)n_m_plus_1/n_m));
fclose(outFile);
// Free memory
gpuErrchk(cudaFree(x));
return 0;
}
|
11,880 | #include "includes.h"
__global__ void updateGradInputVarScaleKernel( float *gradOutputIntData, float *gradInputData, int h, int w, int nWindows, float *xMin, float *xMax, float *yMin, float *yMax) {
const int x = BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int y = BLOCK_SIZE * blockIdx.y + threadIdx.y;
if (x < h and y < w) {
int xMinCurr, xMaxCurr, yMinCurr, yMaxCurr;
double outValue = 0;
for (int windowIdx = 0; windowIdx < nWindows; ++windowIdx) {
xMinCurr = (int)ceil(-xMax[windowIdx]);
yMinCurr = (int)ceil(-yMax[windowIdx]);
xMaxCurr = (int)floor(-xMin[windowIdx]) + 1;
yMaxCurr = (int)floor(-yMin[windowIdx]) + 1;
// The following code block implements these lines
// as if they were executed simultaneously (see `void updateGradInputFrac()`):
// xMinCurr = (x == 0 and xMaxCurr >= 0 ? 0 : xMinCurr);
// xMaxCurr = (x == h-1 and xMinCurr <= 0 ? h+66 : xMaxCurr);
// yMinCurr = (y == 0 and yMaxCurr >= 0 ? 0 : yMinCurr);
// yMaxCurr = (y == w-1 and yMinCurr <= 0 ? w+66 : yMaxCurr);
bool needToChangeMin, needToChangeMax;
needToChangeMin = x == 0 and xMaxCurr >= 0;
needToChangeMax = x == h-1 and xMinCurr <= 0;
if (needToChangeMin) xMinCurr = 0;
if (needToChangeMax) xMaxCurr = h+66;
needToChangeMin = y == 0 and yMaxCurr >= 0;
needToChangeMax = y == w-1 and yMinCurr <= 0;
if (needToChangeMin) yMinCurr = 0;
if (needToChangeMax) yMaxCurr = w+66;
const int t = max(0, min(x+xMinCurr, h) );
const int b = max(0, min(x+xMaxCurr, h) );
const int l = max(0, min(y+yMinCurr, w) );
const int r = max(0, min(y+yMaxCurr, w) );
outValue += gradOutputIntData[b*(w+1) + r];
outValue -= gradOutputIntData[t*(w+1) + r];
outValue -= gradOutputIntData[b*(w+1) + l];
outValue += gradOutputIntData[t*(w+1) + l];
// go to the next channel
gradOutputIntData += (h+1)*(w+1);
}
gradInputData[x*w + y] = outValue;
}
} |
11,881 | #include <cstdint>
/*
* For 512 <= d < 1024,
*
* RECIPROCAL_TABLE_32[d - 512] = floor((2^24 - 2^14 + 2^9)/d)
*
* Total space at the moment is 512*2 = 1024 bytes.
*
* TODO: Investigate whether alternative storage layouts are better; examples:
*
* - redundantly store each element in a uint32_t
* - pack two uint16_t values into each uint32_t
* - is __constant__ the right storage specifier? Maybe load into shared memory?
* Shared memory seems like an excellent choice (48k available per SM), though
* I'll need to be mindful of bank conflicts (perhaps circumvent by having
* many copies of the data in SM?).
* - perhaps reading an element from memory is slower than simply calculating
* floor((2^24 - 2^14 + 2^9)/d) in assembly?
*/
__device__ __constant__
uint16_t
RECIPROCAL_TABLE_32[0x200] =
{
0x7fe1, 0x7fa1, 0x7f61, 0x7f22, 0x7ee3, 0x7ea4, 0x7e65, 0x7e27,
0x7de9, 0x7dab, 0x7d6d, 0x7d30, 0x7cf3, 0x7cb6, 0x7c79, 0x7c3d,
0x7c00, 0x7bc4, 0x7b89, 0x7b4d, 0x7b12, 0x7ad7, 0x7a9c, 0x7a61,
0x7a27, 0x79ec, 0x79b2, 0x7979, 0x793f, 0x7906, 0x78cc, 0x7894,
0x785b, 0x7822, 0x77ea, 0x77b2, 0x777a, 0x7742, 0x770b, 0x76d3,
0x769c, 0x7665, 0x762f, 0x75f8, 0x75c2, 0x758c, 0x7556, 0x7520,
0x74ea, 0x74b5, 0x7480, 0x744b, 0x7416, 0x73e2, 0x73ad, 0x7379,
0x7345, 0x7311, 0x72dd, 0x72aa, 0x7277, 0x7243, 0x7210, 0x71de,
0x71ab, 0x7179, 0x7146, 0x7114, 0x70e2, 0x70b1, 0x707f, 0x704e,
0x701c, 0x6feb, 0x6fba, 0x6f8a, 0x6f59, 0x6f29, 0x6ef9, 0x6ec8,
0x6e99, 0x6e69, 0x6e39, 0x6e0a, 0x6ddb, 0x6dab, 0x6d7d, 0x6d4e,
0x6d1f, 0x6cf1, 0x6cc2, 0x6c94, 0x6c66, 0x6c38, 0x6c0a, 0x6bdd,
0x6bb0, 0x6b82, 0x6b55, 0x6b28, 0x6afb, 0x6acf, 0x6aa2, 0x6a76,
0x6a49, 0x6a1d, 0x69f1, 0x69c6, 0x699a, 0x696e, 0x6943, 0x6918,
0x68ed, 0x68c2, 0x6897, 0x686c, 0x6842, 0x6817, 0x67ed, 0x67c3,
0x6799, 0x676f, 0x6745, 0x671b, 0x66f2, 0x66c8, 0x669f, 0x6676,
0x664d, 0x6624, 0x65fc, 0x65d3, 0x65aa, 0x6582, 0x655a, 0x6532,
0x650a, 0x64e2, 0x64ba, 0x6493, 0x646b, 0x6444, 0x641c, 0x63f5,
0x63ce, 0x63a7, 0x6381, 0x635a, 0x6333, 0x630d, 0x62e7, 0x62c1,
0x629a, 0x6275, 0x624f, 0x6229, 0x6203, 0x61de, 0x61b8, 0x6193,
0x616e, 0x6149, 0x6124, 0x60ff, 0x60da, 0x60b6, 0x6091, 0x606d,
0x6049, 0x6024, 0x6000, 0x5fdc, 0x5fb8, 0x5f95, 0x5f71, 0x5f4d,
0x5f2a, 0x5f07, 0x5ee3, 0x5ec0, 0x5e9d, 0x5e7a, 0x5e57, 0x5e35,
0x5e12, 0x5def, 0x5dcd, 0x5dab, 0x5d88, 0x5d66, 0x5d44, 0x5d22,
0x5d00, 0x5cde, 0x5cbd, 0x5c9b, 0x5c7a, 0x5c58, 0x5c37, 0x5c16,
0x5bf5, 0x5bd4, 0x5bb3, 0x5b92, 0x5b71, 0x5b51, 0x5b30, 0x5b10,
0x5aef, 0x5acf, 0x5aaf, 0x5a8f, 0x5a6f, 0x5a4f, 0x5a2f, 0x5a0f,
0x59ef, 0x59d0, 0x59b0, 0x5991, 0x5972, 0x5952, 0x5933, 0x5914,
0x58f5, 0x58d6, 0x58b7, 0x5899, 0x587a, 0x585b, 0x583d, 0x581f,
0x5800, 0x57e2, 0x57c4, 0x57a6, 0x5788, 0x576a, 0x574c, 0x572e,
0x5711, 0x56f3, 0x56d5, 0x56b8, 0x569b, 0x567d, 0x5660, 0x5643,
0x5626, 0x5609, 0x55ec, 0x55cf, 0x55b2, 0x5596, 0x5579, 0x555d,
0x5540, 0x5524, 0x5507, 0x54eb, 0x54cf, 0x54b3, 0x5497, 0x547b,
0x545f, 0x5443, 0x5428, 0x540c, 0x53f0, 0x53d5, 0x53b9, 0x539e,
0x5383, 0x5368, 0x534c, 0x5331, 0x5316, 0x52fb, 0x52e0, 0x52c6,
0x52ab, 0x5290, 0x5276, 0x525b, 0x5240, 0x5226, 0x520c, 0x51f1,
0x51d7, 0x51bd, 0x51a3, 0x5189, 0x516f, 0x5155, 0x513b, 0x5121,
0x5108, 0x50ee, 0x50d5, 0x50bb, 0x50a2, 0x5088, 0x506f, 0x5056,
0x503c, 0x5023, 0x500a, 0x4ff1, 0x4fd8, 0x4fbf, 0x4fa6, 0x4f8e,
0x4f75, 0x4f5c, 0x4f44, 0x4f2b, 0x4f13, 0x4efa, 0x4ee2, 0x4eca,
0x4eb1, 0x4e99, 0x4e81, 0x4e69, 0x4e51, 0x4e39, 0x4e21, 0x4e09,
0x4df1, 0x4dda, 0x4dc2, 0x4daa, 0x4d93, 0x4d7b, 0x4d64, 0x4d4d,
0x4d35, 0x4d1e, 0x4d07, 0x4cf0, 0x4cd8, 0x4cc1, 0x4caa, 0x4c93,
0x4c7d, 0x4c66, 0x4c4f, 0x4c38, 0x4c21, 0x4c0b, 0x4bf4, 0x4bde,
0x4bc7, 0x4bb1, 0x4b9a, 0x4b84, 0x4b6e, 0x4b58, 0x4b41, 0x4b2b,
0x4b15, 0x4aff, 0x4ae9, 0x4ad3, 0x4abd, 0x4aa8, 0x4a92, 0x4a7c,
0x4a66, 0x4a51, 0x4a3b, 0x4a26, 0x4a10, 0x49fb, 0x49e5, 0x49d0,
0x49bb, 0x49a6, 0x4990, 0x497b, 0x4966, 0x4951, 0x493c, 0x4927,
0x4912, 0x48fe, 0x48e9, 0x48d4, 0x48bf, 0x48ab, 0x4896, 0x4881,
0x486d, 0x4858, 0x4844, 0x482f, 0x481b, 0x4807, 0x47f3, 0x47de,
0x47ca, 0x47b6, 0x47a2, 0x478e, 0x477a, 0x4766, 0x4752, 0x473e,
0x472a, 0x4717, 0x4703, 0x46ef, 0x46db, 0x46c8, 0x46b4, 0x46a1,
0x468d, 0x467a, 0x4666, 0x4653, 0x4640, 0x462c, 0x4619, 0x4606,
0x45f3, 0x45e0, 0x45cd, 0x45ba, 0x45a7, 0x4594, 0x4581, 0x456e,
0x455b, 0x4548, 0x4536, 0x4523, 0x4510, 0x44fe, 0x44eb, 0x44d8,
0x44c6, 0x44b3, 0x44a1, 0x448f, 0x447c, 0x446a, 0x4458, 0x4445,
0x4433, 0x4421, 0x440f, 0x43fd, 0x43eb, 0x43d9, 0x43c7, 0x43b5,
0x43a3, 0x4391, 0x437f, 0x436d, 0x435c, 0x434a, 0x4338, 0x4327,
0x4315, 0x4303, 0x42f2, 0x42e0, 0x42cf, 0x42bd, 0x42ac, 0x429b,
0x4289, 0x4278, 0x4267, 0x4256, 0x4244, 0x4233, 0x4222, 0x4211,
0x4200, 0x41ef, 0x41de, 0x41cd, 0x41bc, 0x41ab, 0x419a, 0x418a,
0x4179, 0x4168, 0x4157, 0x4147, 0x4136, 0x4125, 0x4115, 0x4104,
0x40f4, 0x40e3, 0x40d3, 0x40c2, 0x40b2, 0x40a2, 0x4091, 0x4081,
0x4071, 0x4061, 0x4050, 0x4040, 0x4030, 0x4020, 0x4010, 0x4000};
/*
* For 256 <= d < 512,
*
* RECIPROCAL_TABLE_64[d - 256] = floor((2^19 - 3*2^9)/d)
*
* Total space ATM is 256*2 = 512 bytes. Entries range from 10 to 11
* bits, so with some clever handling of hi bits, we could get three
* entries per 32 bit word, reducing the size to about 256*11/8 = 352
* bytes.
*
* TODO: Investigate whether alternative storage layouts are better;
* see RECIPROCAL_TABLE_32 above for ideas.
*/
__device__ __constant__
uint16_t
RECIPROCAL_TABLE_64[0x100] =
{
0x7fd, 0x7f5, 0x7ed, 0x7e5, 0x7dd, 0x7d5, 0x7ce, 0x7c6,
0x7bf, 0x7b7, 0x7b0, 0x7a8, 0x7a1, 0x79a, 0x792, 0x78b,
0x784, 0x77d, 0x776, 0x76f, 0x768, 0x761, 0x75b, 0x754,
0x74d, 0x747, 0x740, 0x739, 0x733, 0x72c, 0x726, 0x720,
0x719, 0x713, 0x70d, 0x707, 0x700, 0x6fa, 0x6f4, 0x6ee,
0x6e8, 0x6e2, 0x6dc, 0x6d6, 0x6d1, 0x6cb, 0x6c5, 0x6bf,
0x6ba, 0x6b4, 0x6ae, 0x6a9, 0x6a3, 0x69e, 0x698, 0x693,
0x68d, 0x688, 0x683, 0x67d, 0x678, 0x673, 0x66e, 0x669,
0x664, 0x65e, 0x659, 0x654, 0x64f, 0x64a, 0x645, 0x640,
0x63c, 0x637, 0x632, 0x62d, 0x628, 0x624, 0x61f, 0x61a,
0x616, 0x611, 0x60c, 0x608, 0x603, 0x5ff, 0x5fa, 0x5f6,
0x5f1, 0x5ed, 0x5e9, 0x5e4, 0x5e0, 0x5dc, 0x5d7, 0x5d3,
0x5cf, 0x5cb, 0x5c6, 0x5c2, 0x5be, 0x5ba, 0x5b6, 0x5b2,
0x5ae, 0x5aa, 0x5a6, 0x5a2, 0x59e, 0x59a, 0x596, 0x592,
0x58e, 0x58a, 0x586, 0x583, 0x57f, 0x57b, 0x577, 0x574,
0x570, 0x56c, 0x568, 0x565, 0x561, 0x55e, 0x55a, 0x556,
0x553, 0x54f, 0x54c, 0x548, 0x545, 0x541, 0x53e, 0x53a,
0x537, 0x534, 0x530, 0x52d, 0x52a, 0x526, 0x523, 0x520,
0x51c, 0x519, 0x516, 0x513, 0x50f, 0x50c, 0x509, 0x506,
0x503, 0x500, 0x4fc, 0x4f9, 0x4f6, 0x4f3, 0x4f0, 0x4ed,
0x4ea, 0x4e7, 0x4e4, 0x4e1, 0x4de, 0x4db, 0x4d8, 0x4d5,
0x4d2, 0x4cf, 0x4cc, 0x4ca, 0x4c7, 0x4c4, 0x4c1, 0x4be,
0x4bb, 0x4b9, 0x4b6, 0x4b3, 0x4b0, 0x4ad, 0x4ab, 0x4a8,
0x4a5, 0x4a3, 0x4a0, 0x49d, 0x49b, 0x498, 0x495, 0x493,
0x490, 0x48d, 0x48b, 0x488, 0x486, 0x483, 0x481, 0x47e,
0x47c, 0x479, 0x477, 0x474, 0x472, 0x46f, 0x46d, 0x46a,
0x468, 0x465, 0x463, 0x461, 0x45e, 0x45c, 0x459, 0x457,
0x455, 0x452, 0x450, 0x44e, 0x44b, 0x449, 0x447, 0x444,
0x442, 0x440, 0x43e, 0x43b, 0x439, 0x437, 0x435, 0x432,
0x430, 0x42e, 0x42c, 0x42a, 0x428, 0x425, 0x423, 0x421,
0x41f, 0x41d, 0x41b, 0x419, 0x417, 0x414, 0x412, 0x410,
0x40e, 0x40c, 0x40a, 0x408, 0x406, 0x404, 0x402, 0x400};
/*
* This Table 2 from Koç, C. K., 1995, "Analysis of Sliding Window
* Techniques for Exponentiation".
*
* The resolution of this table is higher than the one above because it's
* used in the fixed exponent modexp code and can benefit from using the
* precise bit length of the exponent, whereas the table above has to
* accommodate multiple different exponents simultaneously.
*/
__constant__ int BYTES_TO_CLNW_WINDOW_SIZE_TABLE[] = {
-1, // bits
4, // 128
5, // 256
5, // 384
5, // 512
6, // 640
6, // 768
6, // 896
6, // 1024
6, // 1152
6, // 1280
6, // 1408
6, // 1536
6, // 1664
7, // 1792
7, // 1920
7, // 2048
}; |
11,882 | #include <cstdlib>
#include <ctime>
extern void Task1();
extern void Task2();
int main()
{
srand(time(nullptr));
//Task1();
Task2();
return 0;
}
|
11,883 | /****************************************************************************
*
* cuda-reverse.cu - Array reversal with CUDA
*
* Written in 2017 by Moreno Marzolla <moreno.marzolla(at)unibo.it>
*
* To the extent possible under law, the author(s) have dedicated all
* copyright and related and neighboring rights to this software to the
* public domain worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication
* along with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*
* ---------------------------------------------------------------------------
*
* Compile with:
* nvcc cuda-reverse.cu -o cuda-reverse
*
* Run with:
* ./cuda-reverse [len]
*
* Example:
* ./cuda-reverse
*
****************************************************************************/
#include <stdio.h>
#include <math.h>
/* Reverse in[] into out[] */
void reverse( int *in, int *out, int n )
{
int i;
for (i=0; i<n; i++) {
out[n - 1 - i] = in[i];
}
}
/* In-place reversal of x[] into itself */
void inplace_reverse( int *x, int n )
{
int i = 0, j = n-1;
while (i < j) {
int tmp = x[j];
x[j] = x[i];
x[i] = tmp;
j--;
i++;
}
}
void fill( int *x, int n )
{
int i;
for (i=0; i<n; i++) {
x[i] = i;
}
}
int check( int *x, int n )
{
int i;
for (i=0; i<n; i++) {
if (x[i] != n - 1 - i) {
printf("FAILED: x[%d]=%d, expected %d\n", i, x[i], n-1-i);
return -1;
}
}
printf("OK\n");
return 0;
}
int main( int argc, char* argv[] )
{
int *in, *out;
int n;
const int default_len = 1024*1024;
if ( argc > 2 ) {
printf("\nUsage: %s [len]\n\nReverse an array of \"len\" elements (default %d)\n\n", argv[0], default_len);
return -1;
}
if ( argc > 1 ) {
n = atoi(argv[1]);
} else {
n = default_len;
}
const size_t size = n * sizeof(*in);
/* Allocate and initialize in[] and out[] */
in = (int*)malloc(size);
fill(in, n);
out = (int*)malloc(size);
/* Reverse */
printf("Reverse of %d elements... ", n);
reverse(in, out, n);
check(out, n);
/* In-place reverse */
printf("In-place reverse of %d elements... ", n);
inplace_reverse(in, n);
check(in, n);
/* Cleanup */
free(in);
free(out);
return 0;
}
|
11,884 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
/*! \def ConvertSMVer2Cores(major, minor)
\brief Get number of CUDA cores per multiprocessor.
\link https://en.wikipedia.org/wiki/CUDA
\arg[in] major GPU Architecture major version.
\arg[in] minor GPU Architecture minor version.
\returns 0 if GPU Architecture is unknown, or number of CUDA cores per multiprocessor.
*/
#define ConvertSMVer2Cores(major, minor) \
(((major) == 1)? ( /* Tesla */ \
((minor) == 0)? 8: /* G80*/ \
((minor) == 1)? 8: /* G8x, G9x */ \
((minor) == 2)? 8: /* GT21x */ \
((minor) == 3)? 8: /* GT200 */ \
0): \
((major) == 2)? ( /* Fermi */ \
((minor) == 0)? 32: /* GF100, GF110 */ \
((minor) == 1)? 48: /* GF10x, FG11x */ \
0): \
((major) == 3)? ( /* Kepler */ \
((minor) == 0)? 192: /* GK10x */ \
((minor) == 2)? 192: /* GK20A */ \
((minor) == 5)? 192: /* GK11x, GK208 */ \
((minor) == 7)? 192: /* GK210 */ \
0): \
((major) == 5)? ( /* Maxwell */ \
((minor) == 0)? 128: /* GM10X */ \
((minor) == 2)? 128: /* GM20X */ \
((minor) == 3)? 128: /* GM20B */ \
0): \
((major) == 6)? ( /* Pascal */ \
((minor) == 0)? 64: /* GP100 */ \
((minor) == 1)? 128: /* GP10X */ \
((minor) == 2)? 128: /* GP10B */ \
0): \
((major) == 7)? ( /* Volta */ \
((minor) == 0)? 64: /* GV100 */ \
0): \
0)
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
int count = 0;
if (cudaGetDeviceCount(&count) != cudaSuccess) return 0;
printf("device: %d\n", count);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("name: %s\n", prop.name);
printf("compute capability: %d.%d\n", prop.major, prop.minor);
printf("multiProcessorCount: %d (%d Cores)\n", prop.multiProcessorCount,
ConvertSMVer2Cores(prop.major, prop.minor) * prop.multiProcessorCount);
printf("maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf("maxThreadsPerMultiProcessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf("warpSize: %d\n", prop.warpSize);
printf("maxThreadsDim: %d, %d, %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("totalGlobalMem: %zu MiB\n", prop.totalGlobalMem/1024/1024);
printf("clockRate: %d MHz\n", prop.clockRate/1000);
printf("isMultiGpuBoard: %d\n", prop.isMultiGpuBoard);
printf("maxGridSize: %d x %d x %d\n", prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
11,885 | #include "includes.h"
__global__ void check_support(float * vec_input, float * vec, const int n, int * support_counter)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) {
if ( vec_input[xIndex] != 0 ) {
if (vec[xIndex] != 0) {
atomicAdd(support_counter, 1);
}
}
else {
if (vec[xIndex] == 0) {
atomicAdd(support_counter + 1, 1);
}
}
}
} |
11,886 | #include <stdio.h>
// Size of array
#define N 1048576
// Kernel
__global__ void add_vectors(double *a, double *b, double *c)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < N) c[id] = a[id] + b[id];
}
// Main program
int main()
{
// Number of bytes to allocate for N doubles
size_t bytes = N*sizeof(double);
// Allocate memory for arrays A, B, and C on host
double *A = (double*)malloc(bytes);
double *B = (double*)malloc(bytes);
double *C = (double*)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
double *d_A, *d_B, *d_C;
cudaMalloc(&d_A, bytes);
cudaMalloc(&d_B, bytes);
cudaMalloc(&d_C, bytes);
// Fill host arrays A and B
for(int i=0; i<N; i++)
{
A[i] = 1.0;
B[i] = 2.0;
}
// Copy data from host arrays A and B to device arrays d_A and d_B
cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice);
// Set execution configuration parameters
// thr_per_blk: number of CUDA threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = 256;
int blk_in_grid = ceil( float(N) / thr_per_blk );
// Launch kernel
add_vectors<<< blk_in_grid, thr_per_blk >>>(d_A, d_B, d_C);
// Copy data from device array d_C to host array C
cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost);
// Verify results
double tolerance = 1.0e-14;
for(int i=0; i<N; i++)
{
if( fabs(C[i] - 3.0) > tolerance)
{
printf("\nError: value of C[%d] = %d instead of 3.0\n\n", i, C[i]);
exit(1);
}
}
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n---------------------------\n");
printf("__SUCCESS__\n");
printf("---------------------------\n");
printf("N = %d\n", N);
printf("Threads Per Block = %d\n", thr_per_blk);
printf("Blocks In Grid = %d\n", blk_in_grid);
printf("---------------------------\n\n");
return 0;
}
|
11,887 | #include <iostream>
#include <cuda.h>
#include <math.h>
#include <fstream>
using namespace std;
#define BLOCKSIZE 32
//test code
/*void mat(const float*A , const float* B, float* C, const int N, const int M, const int K) {
int i,j,l;
#pragma omp parallel for shared(A,B,C) private(i,j,l)
for(i=0; i<N; i++) {
for(l=0; l<M; l++) {
float a = A[M*i+l];
for(j=0; j<K; j++) {
C[K*i + j] += a*B[K*l+j];
}
}
}
}*/
__global__ void nmfw(float *a, int r, int c, int k, float *w, float *h, float *wcp)//must be block synchronized!!!
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//compute W
if (col < k && row < r) {
//ah'
float sum = 0.0;
float temp = 0.0;
for (int i = 0; i < c; i++)
sum += a[row*c + i]*h[col*c + i];
temp = w[row*k+col]*sum;
//whh'
sum = 0.0;
for (int i = 0; i < c; i++) {
for (int j = 0; j < k; j++) {
sum += w[row*k + j]*h[j*c + i]*h[col*c+i];
}
}
__syncthreads();
wcp[row*k+col] = temp/sum;
}
}
__global__ void nmfh(float *a, int r, int c, int k, float *w, float *h, float *hcp)//must be block synchronized!!!
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//compute H
if (row < k && col < c) {
//w'a
float temp = 0.0;
float sum;
sum = 0.0;
for (int i = 0; i < r; i++)
sum += w[i*k + row]*a[i*c+col];
temp = h[row*c+col]*sum;
//w'wh
sum = 0.0;
for (int i = 0; i < k; i++)
for (int j = 0; j < r; j++)
sum += w[j*k + row]*w[j*k + i]*h[i*c+col];
__syncthreads();
hcp[row*c+col] = temp/sum;
}
}
__global__ void nmfcpy(float *mat, float *matcp, int m, int n) //kernel copy must be block synchronized!!!
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (row < m && col < n)
mat[row*n+col] = matcp[row*n+col];
}
void nmfgpu(float *a, int r, int c, int k, int niters, float *w, float *h)
{
const dim3 block(BLOCKSIZE, BLOCKSIZE);
const dim3 grid((c + BLOCKSIZE - 1)/ BLOCKSIZE,(r + BLOCKSIZE - 1)/ BLOCKSIZE);
//initialize
float *dev_w, *dev_h, *dev_a, *dev_wcp, *dev_hcp;
cudaMalloc((void**)&dev_w, sizeof(float)*r*k);
cudaMalloc((void**)&dev_h, sizeof(float)*k*c);
cudaMalloc((void**)&dev_wcp, sizeof(float)*r*k);
cudaMalloc((void**)&dev_hcp, sizeof(float)*k*c);
cudaMalloc((void**)&dev_a, sizeof(float)*r*c);
cudaMemcpy(dev_w, w, sizeof(float)*r*k, cudaMemcpyHostToDevice);
cudaMemcpy(dev_h, h, sizeof(float)*k*c, cudaMemcpyHostToDevice);
cudaMemcpy(dev_a, a, sizeof(float)*r*c, cudaMemcpyHostToDevice);
//
//kernel
for (int i=0; i<niters; i++) { //slow way
nmfw<<<grid, block>>>(dev_a, r, c, k, dev_w, dev_h, dev_wcp);
nmfcpy<<<grid, block>>>(dev_w, dev_wcp, r, k);
nmfh<<<grid, block>>>(dev_a, r, c, k, dev_w, dev_h, dev_hcp);
nmfcpy<<<grid, block>>>(dev_h, dev_hcp, k, c);
}
//cpy back
cudaMemcpy(w, dev_w, sizeof(float)*r*k, cudaMemcpyDeviceToHost);
cudaMemcpy(h, dev_h, sizeof(float)*k*c, cudaMemcpyDeviceToHost);
//clean up
cudaFree(dev_w);
cudaFree(dev_h);
cudaFree(dev_a);
}
//test code, u can test it if u want
/*int main()
{
srand(1000);
float *w, *h;
const int r = 194;
int k = 50;
const int c = 259;
w = new float[r*k];
h = new float[k*c];
float a[r*c];
ifstream file("af.txt");
for (int i = 0; i < 194 * 259; i++)
file >> a[i];
for (int i = 0; i < r*k; i++)
{
w[i] = (float)rand()/RAND_MAX;
}
for (int i = 0; i < k*c; i++)
{
h[i] = (float)rand()/RAND_MAX;
}
nmfgpu(a, r, c, k, 100, w, h);
float *res = new float[r*c];
for (int i = 0; i<r*c; i++)
res[i] = 0;
mat(w,h,res,r,k,c);
ofstream output("result.txt");
for (int i=0; i < r; i++) {
for (int j=0; j <c; j++)
output << res[i*c+j] << " ";
output << "\n";
}
}*/
|
11,888 | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "common.h"
__device__ void block2D_hybrid_coarsen_x(float c0, float c1, float* A0, float* Anext, int nx, int ny, int nz)
{
//thread coarsening along x direction
const int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
const int i2 = blockIdx.x * blockDim.x * 2 + threadIdx.x + blockDim.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int sh_id = threadIdx.x + threadIdx.y * blockDim.x * 2;
const int sh_id2 = threadIdx.x + blockDim.x + threadIdx.y * blockDim.x * 2;
//shared memeory
// extern __shared__ float sh_A0[];
// FIXME: Dynamic shared memory.
__shared__ float sh_A0[32 * 2 * 4];
sh_A0[sh_id] = 0.0f;
sh_A0[sh_id2] = 0.0f;
__syncthreads();
//get available region for load and store
const bool w_region = i > 0 && j > 0 && (i < (nx - 1)) && (j < (ny - 1));
const bool w_region2 = j > 0 && (i2 < nx - 1) && (j < ny - 1);
const bool x_l_bound = (threadIdx.x == 0);
const bool x_h_bound = ((threadIdx.x + blockDim.x) == (blockDim.x * 2 - 1));
const bool y_l_bound = (threadIdx.y == 0);
const bool y_h_bound = (threadIdx.y == (blockDim.y - 1));
//register for bottom and top planes
//because of thread coarsening, we need to doulbe registers
float bottom = 0.0f, bottom2 = 0.0f, top = 0.0f, top2 = 0.0f;
//load data for bottom and current
if ((i < nx) && (j < ny)) {
bottom = A0[Index3D(nx, ny, i, j, 0)];
sh_A0[sh_id] = A0[Index3D(nx, ny, i, j, 1)];
}
if ((i2 < nx) && (j < ny)) {
bottom2 = A0[Index3D(nx, ny, i2, j, 0)];
sh_A0[sh_id2] = A0[Index3D(nx, ny, i2, j, 1)];
}
__syncthreads();
for (int k = 1; k < nz - 1; k++) {
float a_left_right, a_up, a_down;
//load required data on xy planes
//if it on shared memory, load from shared memory
//if not, load from global memory
if ((i < nx) && (j < ny))
top = A0[Index3D(nx, ny, i, j, k + 1)];
if (w_region) {
a_up = y_h_bound ? A0[Index3D(nx, ny, i, j + 1, k)] : sh_A0[sh_id + 2 * blockDim.x];
a_down = y_l_bound ? A0[Index3D(nx, ny, i, j - 1, k)] : sh_A0[sh_id - 2 * blockDim.x];
a_left_right = x_l_bound ? A0[Index3D(nx, ny, i - 1, j, k)] : sh_A0[sh_id - 1];
Anext[Index3D(nx, ny, i, j, k)] = (top + bottom + a_up + a_down + sh_A0[sh_id + 1] + a_left_right) * c1
- sh_A0[sh_id] * c0;
}
//load another block
if ((i2 < nx) && (j < ny))
top2 = A0[Index3D(nx, ny, i2, j, k + 1)];
if (w_region2) {
a_up = y_h_bound ? A0[Index3D(nx, ny, i2, j + 1, k)] : sh_A0[sh_id2 + 2 * blockDim.x];
a_down = y_l_bound ? A0[Index3D(nx, ny, i2, j - 1, k)] : sh_A0[sh_id2 - 2 * blockDim.x];
a_left_right = x_h_bound ? A0[Index3D(nx, ny, i2 + 1, j, k)] : sh_A0[sh_id2 + 1];
Anext[Index3D(nx, ny, i2, j, k)] = (top2 + bottom2 + a_up + a_down + a_left_right + sh_A0[sh_id2 - 1]) * c1
- sh_A0[sh_id2] * c0;
}
//swap data
__syncthreads();
bottom = sh_A0[sh_id];
sh_A0[sh_id] = top;
bottom2 = sh_A0[sh_id2];
sh_A0[sh_id2] = top2;
__syncthreads();
}
}
|
11,889 | #include <cuda_runtime.h>
#include <iostream>
__global__ void add(int *d_a,int *d_b,int *d_c,int n)
{
int idx = threadIdx.x;
d_c[idx] = d_a[idx] + d_b[idx];
}
int main()
{
int n = 0;
int blag = 1;//标志位
do{
std::cout << "请输入数组的长度:" << std::endl;
std::cin >> n;
if(n <= 0)
{
std::cout << "你输入的数组长度为为正数,请重新输入:" << std::endl;
}else
{
blag = 0;
}
}while(blag);
/******申请主机内存******/
int * h_a = (int*)malloc(sizeof(int) * n);
int * h_b = (int*)malloc(sizeof(int) * n);
int * h_c = (int*)malloc(sizeof(int) * n);
/******主机内存赋值********/
for(int i = 0; i < n; ++i)
{
h_a[i] = i + 1;
h_b[i] = i + 3;
}
/******申请设备内存**********/
int *d_a,*d_b,*d_c;
cudaMalloc((void**)&d_a,sizeof(int) * n);
cudaMalloc((void**)&d_b,sizeof(int) * n);
cudaMalloc((void**)&d_c,sizeof(int) * n);
/******主机内存数据复制到设备内存********/
cudaMemcpy(d_a,h_a,sizeof(int) * n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,sizeof(int) * n,cudaMemcpyHostToDevice);
/*****启动核函数********/
add<<<1,n>>>(d_a,d_b,d_c,n);
/*****设备内存数据复制到主机内存*********/
cudaMemcpy(h_c,d_c,sizeof(int) * n,cudaMemcpyDeviceToHost);
for(int i = 0; i < n; ++i)
{
std::cout << "h_c[" << i << "] = " << h_c[i] << " ";
}
std::cout << std::endl;
/*******释放设备内存*****/
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
/*****释放主机内存*****/
free(h_a);
free(h_b);
free(h_c);
std::cout << "运行结束!" << std::endl;
return 0;
}
|
11,890 | /*
* Parallel bitonic sort using CUDA.
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
* Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
* License: BSD 3
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 128 // 2^8
#define BLOCKS 8192 // 2^14
#define NUM_VALS THREADS *BLOCKS
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
float random_float()
{
return (float)rand() / (float)RAND_MAX;
}
void array_print(float *arr, int length)
{
int i;
for (i = 0; i < length; ++i)
{
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(float *arr, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; ++i)
{
arr[i] = random_float();
}
}
__global__ void bitonic_sort_step(float *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
/* The threads with the lowest ids sort the array. */
if ((ixj) > i)
{
if ((i & k) == 0)
{
/* Sort ascending */
if (dev_values[i] > dev_values[ixj])
{
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0)
{
/* Sort descending */
if (dev_values[i] < dev_values[ixj])
{
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void bitonic_sort(float *values)
{
float *dev_values;
size_t size = NUM_VALS * sizeof(float);
cudaMalloc((void **)&dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1); /* Number of blocks */
dim3 threads(THREADS, 1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1)
{
/* Minor step */
for (j = k >> 1; j > 0; j = j >> 1)
{
bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
void writefile(char *filename, float *buffer, int num)
{
FILE *fp;
fp = fopen(filename, "w");
for (int j = 0; j < num; j++)
{
fprintf(fp, "%0.0f\n", *(buffer + j));
}
fclose(fp);
}
int main(int argc, char *argv[])
{
clock_t start, stop;
if (argc != 3)
{
printf("Invalid argument count. %s accepts 1-4 arguments, %d given\n",
argv[0], argc);
return -1;
}
float *values = (float *)malloc(NUM_VALS * sizeof(float));
// array_fill(values, NUM_VALS);
if (values == NULL)
{
printf("Insufficient host memory to allocate at %d", __LINE__);
return -3;
}
start = clock();
FILE *fin = fopen(argv[1], "r");
for (int i = 0; i < NUM_VALS; i++)
{
if (EOF == fscanf(fin, "%f ", &values[i]))
{
break;
}
}
bitonic_sort(values); /* Inplace */
writefile(argv[2], values, NUM_VALS);
stop = clock();
print_elapsed(start, stop);
free(values);
} |
11,891 | #ifndef QSORT_IMPL_KERNEL_CU
#define QSORT_IMPL_KERNEL_CU
#endif
|
11,892 | #include "includes.h"
/*
* cuArraysPadding.cu
* Padding Utitilies for oversampling
*/
//padding zeros in the middle, move quads to corners
//for raw chunk data oversampling
//tested
__global__ void cuArraysR2C_kernel(float *image1, float2 *image2, int size)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx < size)
{
image2[idx].x = image1[idx];
image2[idx].y = 0.0f;
}
} |
11,893 | __global__
void WorkItemRank(int *scan, int *lbs, int *wir, int sizeLbs) {
for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeLbs; i+=blockDim.x*gridDim.x)
{
wir[i] = i - scan[lbs[i]];
}
}
__global__
void FindChangeColor(int sizeLarge, int *nodes, int *wir, int *lbs, int sizeLbs, int *col_id, int *offset, int currentColor, int *color, int *set) {
for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeLbs; i+=blockDim.x*gridDim.x)
{
int neighborOwner = lbs[i];
int neighbor = col_id[offset[nodes[lbs[i]]] + wir[i]];
if(neighbor >= neighborOwner && (color[neighbor]==0 || color[neighbor]==currentColor))
set[neighborOwner]=0;
}
}
__global__
void assignColor(int *set, int sizeLarge, int *Large, int *color, int currentColor)
{
for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeLarge; i=i+gridDim.x*blockDim.x)
{
if(set[i]==1 && color[Large[i]]==0)
color[Large[i]]=currentColor;
set[i]=1;
}
}
|
11,894 | /*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
* Author: Shih-Hao Tseng <shtseng@caltech.edu>
*/
#include "helper_functions.cuh"
/*
double normpdf(const double x, const double mu, const double sigma){
double power = (x-mu)/sigma;
return exp(-power*power/2)*INV_SQRT_2PI/sigma;
}
*/
__global__
void
gen_normpdf(int n, const double base, const double step, double *vec_x)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
double current_pos = base + i * step;
vec_x[i] = normpdf(current_pos,1.0);
}
}
__host__ __device__
double
normpdf(const double x, const double sigma){
// mu = 0
double power = x/sigma;
return exp(-power*power/2)*INV_SQRT_2PI/sigma;
}
__global__
void
gen_normpdf(int n, const float base, const float step, float *vec_x)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
float current_pos = base + i * step;
vec_x[i] = normpdf(current_pos,1.0);
}
}
__host__ __device__
float
normpdf(const float x, const float sigma){
// mu = 0
float power = x/sigma;
return exp(-power*power/2)*INV_SQRT_2PI/sigma;
} |
11,895 | #include "includes.h"
__global__ void smoothColor (unsigned char *imagem, unsigned char *saida, unsigned int cols, unsigned int linhas)
{
unsigned int indice = (blockIdx.y * blockDim.x * 65536) + (blockIdx.x * 1024) + threadIdx.x; // calcula o indice do vetor com base nas dimensões de bloco e indice da thread
if(indice >= cols*linhas)
return;
//indices para o campo da imagem que participará do smooth
int i_begin = (indice/(int)cols)-2, i_end = (indice/(int)cols)+2;
int j_begin = (indice%(int)cols)-2, j_end = (indice%(int)cols)+2;
if(i_begin<0) i_begin = 0;
if(j_begin<0) j_begin = 0;
if(i_end>=cols) i_end = cols-1;
if(j_end>=cols) j_end = cols-1;
//calcula o smooth no ponto de indice da thread
int media[3] = {0,0,0};
int qtd = 0;
for (int i = i_begin; i<= i_end; ++i)
{
for(int j = j_begin; j<= j_end; ++j)
{
media[0] += imagem[((i*cols)+j)*3];
media[1] += imagem[((i*cols)+j)*3+1];
media[2] += imagem[((i*cols)+j)*3+2];
qtd++;
}
}
saida[indice*3] = (unsigned char)(media[0]/qtd);
saida[indice*3+1] = (unsigned char)(media[1]/qtd);
saida[indice*3+2] = (unsigned char)(media[2]/qtd);
} |
11,896 | /******************************************************************************************/
///功能:图片缩小两倍
/* 函数名 线程块大小 耗费时间
* kernel_halfsize1 [32,4,1] 639.142us
* kernel_halfsizebyshare1 [32,4,1] 654.107us
* kernel_halfsize [32,8,1] 639.56us
* kernel_halfsizebyshare [32,4,1] 687.768us
*/
/******************************************************************************************/
#include <cuda_runtime.h>
#include <cstdio>
#include <iostream>
/**
* @property 图像缩放
* @func 将图像缩小为原图两倍 像素点为(2*y,2*x)(2*y,2*x+1)(2*y+1,2*x)(2*y+1,2*x+1)的平均值
* 若最后一行或最后一列为奇数列.则越界部分再取最后一行或最后一列
* @param_out out_image 放大后的图像首地址
* @param_in in_image 待放大图像首地址
* @param_in weight 输入图像的宽度
* @param_in height 输入图像的高度
* @param_in channels 输入图像的颜色通道数
* 调用示例:
* halfsize_by_cuda(&out_image->at(0),&img->at(0),img->width(),img->height(),img->channels());
*/
void halfsize_by_cuda(float * const out_image,float const * const in_image, int const weight,int const height,int const channels);
/**********************************************************************************************************************************/
__global__ void kernel_halfsizebyshare1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*2;//输出的x维起始索引
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//输出的y位索引
int stride=iw*ic;//输入图像的行索引的最大值
int in_x0=blockIdx.x*blockDim.x*2*ic*2;//输入图像x维的第一起始点
int in_y0=blockIdx.y*blockDim.y*2;//输入图像y维的第一起始点
int in_x1=in_x0+blockDim.x*ic*2;//输入图像x维的第二起始点
int in_y1=in_y0+blockDim.y;//输入图像y维的第二起始点
int share_x=blockDim.x*4;//共享块内x维最大像素点个数
for (int c = 0; c < ic*2; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;//共享内存内第一个x的索引
int x_s=fact_x_s+blockDim.x*ic*2;//共享内存内第二个x的索引
int y_s0=threadIdx.y*share_x*ic;//共享内存内第一个y的索引
int y_s1=y_s0+blockDim.y*share_x*ic;//共享内存内第二个y的索引
int fact_iw=fact_x_s%ic+stride-ic;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,ih-1)*stride;
int y1=min(in_y1+threadIdx.y,ih-1)*stride;
data[y_s0+fact_x_s]=in[y0+x0];
data[y_s0+x_s]=in[y0+x1];
data[y_s1+fact_x_s]=in[y1+x0];
data[y_s1+x_s]=in[y1+x1];
}
__syncthreads();
for (int c = 0; c <ic*2 ; ++c) {
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int srow1=threadIdx.y*2*share_x*ic;
int srow2=srow1+share_x*ic;
int scol1=(fact_x_s / ic) * 2 * ic + fact_x_s % ic;
int scol2=scol1 + ic;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
__global__ void kernel_halfsize(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=iw*ic;
for(int c=0;c<ic;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < ih);
int icol1 = (fact_x / ic) * 2 * ic + fact_x % ic;
int icol2 = min((icol1 + ic), (iw * ic - ic + fact_x % ic));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (in[index[0]] + in[index[1]] + in[index[2]] + in[index[3]]);
}
}
}
__global__ void kernel_halfsize1(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
//若需要展开ic*3重循环只需修改out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*3;以及for(int c=0;c<ic*3;c++)即可,同时应修改网格大小
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic*2;
int out_y=threadIdx.y+blockIdx.y*blockDim.y;
int stride=iw*ic;
for(int c=0;c<ic*2;c++)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic) {
int irow1 = out_y * 2 * stride;
int irow2 = irow1 + stride * (out_y * 2 + 1 < ih);
int icol1 = (fact_x / ic) * 2 * ic + fact_x % ic;
int icol2 = min((icol1 + ic), (iw * ic - ic + fact_x % ic));
int index[4] = {irow1 + icol1,
irow1 + icol2,
irow2 + icol1,
irow2 + icol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (in[index[0]] + in[index[1]] + in[index[2]] + in[index[3]]);
}
}
}
__global__ void kernel_halfsizebyshare(float *out,float *in,int const ow,int const oh,int const iw,int const ih,int const ic)
{
extern __shared__ float data[];
int out_x=threadIdx.x+blockIdx.x*blockDim.x*ic;//输出的x维起始索引
int out_y=threadIdx.y+blockIdx.y*blockDim.y;//输出的y位索引
int stride=iw*ic;//输入图像的行索引的最大值
int in_x0=blockIdx.x*blockDim.x*2*ic;//输入图像x维的起始点
int in_y0=blockIdx.y*blockDim.y*2;//输入图像y维的起始点
int in_x1=in_x0+blockDim.x*ic;
int in_y1=in_y0+blockDim.y;
int share_x=blockDim.x*2;//共享块内x维最大像素点个数
for (int c = 0; c < ic; ++c)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int x_s=fact_x_s+blockDim.x*ic;
int y_s0=threadIdx.y*share_x*ic;
int y_s1=y_s0+blockDim.y*share_x*ic;
int fact_iw=fact_x_s%ic+stride-ic;
int x0=min(in_x0+fact_x_s,fact_iw);
int x1=min(in_x1+fact_x_s,fact_iw);
int y0=min(in_y0+threadIdx.y,ih-1)*stride;
int y1=min(in_y1+threadIdx.y,ih-1)*stride;
data[y_s0+fact_x_s]=in[y0+x0];
data[y_s0+x_s]=in[y0+x1];
data[y_s1+fact_x_s]=in[y1+x0];
data[y_s1+x_s]=in[y1+x1];
}
__syncthreads();
for (int c = 0; c <ic ; ++c)
{
int fact_x=out_x+blockDim.x*c;
if(out_y<oh&&fact_x<ow*ic)
{
int fact_x_s=threadIdx.x+blockDim.x*c;
int srow1=threadIdx.y*2*share_x*ic;
int srow2=srow1+share_x*ic;
int scol1=(fact_x_s / ic) * 2 * ic + fact_x_s % ic;
int scol2=scol1 + ic;
int index[4] = {srow1 + scol1,
srow1 + scol2,
srow2 + scol1,
srow2 + scol2};
int out_idx = out_y * ow*ic + fact_x;
out[out_idx] = 0.25f * (data[index[0]] + data[index[1]] + data[index[2]] + data[index[3]]);
}
}
}
void halfsize_by_cuda(float * const out_image,float const * const in_image, int const weight,int const height,int const channels)
{
int ow=(weight+1)>>1;
int oh=(height+1)>>1;
int const size_in=weight*height;
int const size_out=ow*oh;
int const bytes_in=size_in*channels* sizeof(float);
int const bytes_out=size_out*channels* sizeof(float);
float *d_in=NULL;
float *d_out=NULL;
cudaMalloc((void**)&d_in,bytes_in);
cudaMalloc((void**)&d_out,bytes_out);
int const x=32;
int const y=4;
int const share_x=x*4;
int const share_y=y*2;
dim3 block (x,y,1);
dim3 grid ((ow-1+x*2)/(x*2),(oh-1+y)/y,1);
cudaMemcpy(d_in,in_image,bytes_in,cudaMemcpyHostToDevice);
kernel_halfsizebyshare1<<<grid,block,share_x*share_y*channels* sizeof(float)>>>(d_out,d_in,ow,oh,weight,height,channels);
cudaMemcpy(out_image,d_out,bytes_out,cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
|
11,897 | #include "includes.h"
__device__ float adaptive_mapping(float k, float q, float val_pixel){
return (k*log(1 + val_pixel))/((100*log10(1 + maxLum)) * ( powf((log(2+8*(val_pixel/maxLum))), (log(q)/log(0.5)) ) ) );
}
__global__ void tonemap_adaptive(float* imageIn, float* imageOut, int width, int height, int channels, int depth, float q, float k){
//printf("maxLum : %f\n", maxLum);
int Row = blockDim.y * blockIdx.y + threadIdx.y;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
if(Row < height && Col < width) {
imageOut[(Row*width+Col)*3+BLUE] = adaptive_mapping(k, q, imageIn[(Row*width+Col)*3+BLUE]);
imageOut[(Row*width+Col)*3+GREEN] = adaptive_mapping(k, q, imageIn[(Row*width+Col)*3+GREEN]);
imageOut[(Row*width+Col)*3+RED] = adaptive_mapping(k, q, imageIn[(Row*width+Col)*3+RED]);
}
} |
11,898 | #include <iostream>
#include <cmath>
#include <functional>
#include <random>
#include <sys/time.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/generate.h>
#define N 4
double wtime()
{
struct timeval t;
gettimeofday (&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
float initMatr2()
{
std::default_random_engine generator;
generator.seed(std::random_device()());
std::uniform_real_distribution<float> distrib(0, 100);
return distrib(generator);
}
void thrustCuda()
{
thrust::host_vector<float> mA(N);
thrust::host_vector<float> mB(N);
thrust::generate(mA.begin(), mA.end(), initMatr2);
thrust::generate(mB.begin(), mB.end(), initMatr2);
thrust::device_vector<float> mA_d(mA);
thrust::device_vector<float> mB_d(mB);
thrust::device_vector<float> mC_d(mB);
for (int i = 0; i < N; i++)
thrust::transform(mA_d.begin(), mA_d.end(), mB_d.begin(),
mC_d.begin(), thrust::plus<float>());
thrust::host_vector<float> mC(mC_d);
for (int i = 0; i < N; i++)
std::cout << mA[i] << " ";
std::cout << std::endl;
for (int i = 0; i < N; i++)
std::cout << mB[i] << " ";
std::cout << std::endl;
for (int i = 0; i < N; i++)
std::cout << mC[i] << " ";
std::cout << std::endl;
}
int main()
{
double time = -wtime();
thrustCuda();
time += wtime();
std::cout << time << std::endl;
return 0;
}
|
11,899 | #include <stdio.h>
// This function (aka "Kernel") runs on the GPU
__global__ void hello_world()
{
printf("Hello World from Thread %d !\n", threadIdx.x);
}
int main(void)
{
// Run hello World on the GPU
hello_world<<<1, 1>>>();
// Wait for GPU to finish
cudaDeviceSynchronize();
return 0;
}
|
11,900 | __global__ void test()
{
do
{
}while(1);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.