serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
8,601 | #include "includes.h"
__global__ void calcularCRS(int *val, int *col_ind, int *row_ptr, int *u, int *resultado, int l ){
int i = threadIdx.x + blockIdx.x*blockDim.x; // 0 - 9
int j = threadIdx.y + blockIdx.y*blockDim.y; // 0 - 9
int suma = 0;
for(int k = row_ptr[i]-1; k < row_ptr[i+1]-1; k++){
suma += val[k] * u[j + ( (col_ind[k]-1) * l) ];
}
resultado[j+i*l] = suma;
} |
8,602 | //scp -P 2200 ./input.ppm o.shtepa@sandbox.zhores.net:./
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define HISTOGRAM_LENGTH 512
#define BLOCK_SIZE 512
#define PGMHeaderSize 0x40
//--------------------------------------------------------------------------------------------------------------------
inline bool loadPPM(const char *file, unsigned char **data, unsigned int *w, unsigned int *h, unsigned int *channels)
{
FILE *fp = NULL;
unsigned int width = 0;
unsigned int height = 0;
unsigned int maxval = 0;
unsigned int i = 0;
char header[PGMHeaderSize];
fp = fopen(file, "rb");
if (!fp) {
fprintf(stderr, "__LoadPPM() : unable to open file\n" );
return false;
}
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}
if (strncmp(header, "P5", 2) == 0)
{
*channels = 1;
}
else if (strncmp(header, "P6", 2) == 0)
{
*channels = 3;
}
else
{
fprintf(stderr,"__LoadPPM() : File is not a PPM or PGM image\n" );
*channels = 0;
return false;
}
while (i < 3)
{
if (fgets(header, PGMHeaderSize, fp) == NULL)
{
fprintf(stderr,"__LoadPPM() : reading PGM header returned NULL\n" );
return false;
}
if (header[0] == '#')
{
continue;
}
if (i == 0)
{
i += sscanf(header, "%u %u %u", &width, &height, &maxval);
}
else if (i == 1)
{
i += sscanf(header, "%u %u", &height, &maxval);
}
else if (i == 2)
{
i += sscanf(header, "%u", &maxval);
}
}
if (NULL != *data)
{
if (*w != width || *h != height)
{
fprintf(stderr, "__LoadPPM() : Invalid image dimensions.\n" );
}
}
else
{
*data = (unsigned char *) malloc(sizeof(unsigned char) * width * height * *channels);
if (!data) {
fprintf(stderr, "Unable to allocate hostmemory\n");
return false;
}
*w = width;
*h = height;
}
if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) == 0)
{
fprintf(stderr, "__LoadPPM() : read data returned error.\n" );
fclose(fp);
return false;
}
fclose(fp);
return true;
}
//--------------------------------------------------------------------------------------------------------------------
__global__ void grayscale(unsigned char * input, unsigned char * output, int size)
{
unsigned char r, g, b;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
r = input[3 * i];
g = input[3 * i + 1];
b = input[3 * i + 2];
output[i] = (unsigned char)(0.21*(float)r + 0.71*(float)g + 0.07*(float)b);
/*if (i == 0)
{
printf("grayscale i: %d\t", i);
printf("(r,g,b): (%u,%u,%u)\t", r, g, b);
printf("output[i]: %u\n", output[i]);
}*/
}
}
//--------------------------------------------------------------------------------------------------------------------
__global__ void histogram(unsigned char *input, unsigned int *histo, int size)
{
__shared__ unsigned int histo_private[HISTOGRAM_LENGTH];
if (threadIdx.x < HISTOGRAM_LENGTH) histo_private[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
// stride is the total number of threads in grid (threads/block*blocks/grid)
int stride = blockDim.x * gridDim.x;
// All threads handle blockDim.x * gridDim.x consecutive elements
while (i<size)
{
atomicAdd(&(histo[input[i]]), 1);
i += stride;
}
__syncthreads();
if (threadIdx.x < HISTOGRAM_LENGTH)
{
atomicAdd(&(histo[threadIdx.x]), histo_private[threadIdx.x]);
}
/*if (threadIdx.x == 0 && blockIdx.x == 0)
{
printf("histogram (bx,tx)=(%d,%d): %u\n", blockIdx.x, threadIdx.x, histo[threadIdx.x]);
}*/
}
//--------------------------------------------------------------------------------------------------------------------
int main()
{
unsigned char *data = NULL, *d_idata = NULL, *grayImage = NULL;
unsigned int *d_odata = NULL;
unsigned int w, h, channels;
unsigned int numElements;
size_t datasize;
if(!loadPPM("input.ppm", &data, &w, &h, &channels))
{
fprintf(stderr, "Failed to open File\n");
exit(EXIT_FAILURE);
}
printf("Loaded file with w:%d h:%d channels:%d \n",w, h, channels);
numElements = w*h*channels;
datasize = numElements * sizeof(unsigned char);
cudaMalloc(&d_idata, datasize);
cudaMalloc(&grayImage, datasize);
cudaMalloc(&d_odata, datasize);
printf("Allocate Devicememory for data\n");
cudaMemcpy(d_idata, data, datasize, cudaMemcpyHostToDevice);
printf("Copy input data from the host memory to the CUDA device\n");
dim3 threadsPerBlock(BLOCK_SIZE);
dim3 blocksPerGrid((numElements-1)/ BLOCK_SIZE + 1 );
printf("CUDA kernel launch with [%d %d] blocks of [%d %d] threads\n", blocksPerGrid.x, blocksPerGrid.y,
threadsPerBlock.x, threadsPerBlock.y);
grayscale<<<blocksPerGrid, threadsPerBlock>>>(d_idata, grayImage, w*h);
printf("Done grayscale\n");
/*cudaMemcpy(data, grayImage, datasize, cudaMemcpyDeviceToHost);
printf("Copy grayImage from the CUDA device to the host memory\n");*/
histogram<<<blocksPerGrid, threadsPerBlock>>>(grayImage, d_odata, w*h);
printf("Done histogram\n");
cudaMemcpy(data, d_odata, datasize, cudaMemcpyDeviceToHost);
printf("Copy output data from the CUDA device to the host memory\n");
free(data);
cudaFree(d_idata);
cudaFree(d_odata);
cudaFree(grayImage);
printf("Free device and host memory\n");
}
|
8,603 | /****************************************************************************************
* CONNECTED COMPONENTS ON THE GPU
* ==============================
*
*
*
* Copyright (c) 2010 International Institute of Information Technology,
* Hyderabad.
* All rights reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation for research purpose is hereby granted without fee,
* provided that the above copyright notice and this permission notice appear
* in all copies of this software and that you do not sell the software.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,
* EXPRESS, IMPLIED OR OTHERWISE.
*
* Please report any issues to Jyothish Soman (jyothish@students.iiit.ac.in)
*
* Please cite following paper, if you use this software for research purpose
*
* "Fast GPU Algorithms for Graph Connectivity, Jyothish Soman, K. Kothapalli,
* and P. J. Narayanan, in Proc. of Large Scale Parallel Processing,
* IPDPS Workshops, 2010.
*
*
*
*
* Created by Jyothish Soman
*
****************************************************************************************/
/*
*
* Function to speedup the selection process in the first iteration
* The ancestor tree is initialized to the add the edge from larger edge to its smaller neighbour in this method.
* The process is random and each edge performs this task independently.
* select_winner_init
*
*/
struct ed{
long long int x;
};
typedef struct ed edge;
struct grp{
int num_e,num_n;
int**neigh,*deg;
};
typedef struct grp my_graph;
__global__
void select_winner_init(int* an,edge *ed_list,int num_e,int num_n,int*flag,char*mark){
int a,b,x,y,mn,mx;
long long int t;
a=blockIdx.y*gridDim.x+blockIdx.x;
b=threadIdx.x;
a=a*512+b;
if(a<num_e){
t=ed_list[a].x;
x=(int)t & 0xFFFFFFFF;
y=(int)(t>>32);
mx=x>y?x:y;
mn=x+y-mx;
an[mx]=mn;
}
return;
}
/*
Function to hook from higher valued tree to lower valued tree. For details, read the PPL Paper or LSPP paper or my master's thesis.
Following greener's algorithm, there are two iterations, one from lower valued edges to higher values edges
and the second iteration goes vice versa. The performance of this is largely related to the input.
*/
__global__
void select_winner2(int* an,edge *ed_list,int num_e,int num_n,int*flag,char*mark){
int a,b,x,y,a_x,a_y,mn,mx;
long long int t;
a=blockIdx.y*gridDim.x+blockIdx.x;
b=threadIdx.x;
__shared__ int s_flag;
a=a*512+b;
if(b==1)
s_flag=0;
__syncthreads();
if(a<num_e){
if(mark[a]==0){
t=ed_list[a].x;
x=(int)t & 0xFFFFFFFF;
y=(int)(t>>32);
a_x=an[x];
a_y=an[y];
mx=a_x>a_y?a_x:a_y;
mn=a_x+a_y-mx;
if(mn==mx){
mark[a]=-1;
}
else{
an[mn]=mx;
s_flag=1;
}
}
}
__syncthreads();
if(b==1){
if(s_flag==1){
*flag=1;
}
}
return;
}
/*
Function to hook from lower valued to higher valued trees.
*/
__global__
void select_winner(int* an,edge *ed_list,int num_e,int num_n,int*flag,char*mark){
int a,b,x,y,a_x,a_y,mn,mx;
long long int t;
a=blockIdx.y*gridDim.x+blockIdx.x;
b=threadIdx.x;
__shared__ int s_flag;
a=a*512+b;
if(b==1)
s_flag=0;
__syncthreads();
if(a<num_e){
if(mark[a]==0){
t=ed_list[a].x;
x=(int)t & 0xFFFFFFFF;
y=(int)(t>>32);
a_x=an[x];
a_y=an[y];
mx=a_x>a_y?a_x:a_y;
mn=a_x+a_y-mx;
if(mn==mx){
mark[a]=-1;
}
else{
an[mx]=mn;
s_flag=1;
}
}
}
__syncthreads();
if(b==1){
if(s_flag==1){
*flag=1;
}
}
return;
}
__global__
void p_jump(int num_n,int* an,int *flag){
int a,b,x,y;
a=blockIdx.y*gridDim.x+blockIdx.x;
b=threadIdx.x;
a=a*512+b;
__shared__ int s_f;
if(a>=num_n)
return;
if(b==1){
s_f=0;
}
__syncthreads();
if(a<num_n){
y=an[a];
x=an[y];
if(x!=y){
s_f=1;
an[a]=x;
}
}
if(b==1){
if(s_f==1){
*flag=1;
}
}
}
/*
Function to do a masked jump
Nodes are either root nodes or leaf nodes. Leaf nodes are directly connected to the root nodes, hence do not
need to jump itertively. Once root nodes have reascertained the new root nodes, the leaf nodes can just jump once
*/
__global__
void p_jump_masked(int num_n,int* an,int *flag,char*mask){
int a,b,x,y;
a=blockIdx.y*gridDim.x+blockIdx.x;
b=threadIdx.x;
a=a*512+b;
__shared__ int s_f;
if(a>=num_n)
return;
if(b==1){
s_f=0;
}
__syncthreads();
if(mask[a]==0){
y=an[a];
x=an[y];
if(x!=y){
s_f=1;
an[a]=x;
}
else{
mask[a]=-1;
}
}
if(b==1){
if(s_f==1){
*flag=1;
}
}
}
/*
Function for pointer jumping in the tree, the tree height is shortened by this method.
Here the assumption is that all the nodes are root nodes, or not known whether they are leaf nodes.
Works well in the early iterations
*/
__global__
void p_jump_unmasked(int num_n,int* an,char *mask){
int a,b,x,y;
a=blockIdx.y*gridDim.x+blockIdx.x;
b=threadIdx.x;
a=a*512+b;
if(a>=num_n)
return;
__syncthreads();
if(mask[a]==1){
y=an[a];
x=an[y];
an[a]=x;
}
}
/*
Function to create self pointing tree.
*/
__global__
void update_an(int*an,int num_n){
int a,b;
a=blockIdx.y*gridDim.x+blockIdx.x;
b=threadIdx.x;
a=a*512+b;
if(a>=num_n)
return;
an[a]=a;
return;
}
/*
Function to initialize each edge as a clean copy.
*/
__global__
void update_mark(char *mark,int num_e){
int j;
j=blockIdx.y*gridDim.x+blockIdx.x;
j=j*512+threadIdx.x;
if(j>=num_e)
return;
mark[j]=0;
}
/*
Function to check if each node is the parent of itself or not and to update it as a leaf or root node
*/
__global__
void update_mask(char *mask,int n,int *an){
int j;
j=blockIdx.y*gridDim.x+blockIdx.x;
j=j*512+threadIdx.x;
if(j>=n)
return;
mask[j]=an[j]==j?0:1;
return;
}
|
8,604 | /*Parallel and Distributed Systems
--CUDA KNN Algorithm--
-Author: Mitsios Georgios
-September 2014
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <float.h>
#include <math.h>
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
typedef struct{
double *data;
int leading_dim;
int secondary_dim;
} knn_struct;
//1st input option. Random initialization
void random_initialization(knn_struct *set, int cal){
int i = 0;
int n = set->leading_dim;
int m = set->secondary_dim;
double *tmp_set = set->data;
srand(cal*time(NULL));
/*Generate random floating points [-50 50]*/
for(i=0; i<m*n; i++){
tmp_set[i] = 100 * (double)rand() / RAND_MAX - 50;
}
}
//2nd input option. Import data from benchmark files
void initialize(knn_struct *set){
int i = 0;
int n = set->leading_dim;
int m = set->secondary_dim;
float *tmp_set;
double *tempArray;
tmp_set = (float*)malloc(n*m*sizeof(float));
tempArray = (double*)malloc(n*m*sizeof(double));
FILE *fp;
size_t t;
if (m>100000){
if ((fp = fopen("baseREAD.bin", "rb")) == NULL){ printf("Can't open output file"); }
}
else{ if ((fp = fopen("queriesREAD.bin", "rb")) == NULL){ printf("Can't open output file"); } }
t = fread(tmp_set, n*m, sizeof(float), fp);
fclose(fp);
//Convert float input data to doubles
for (i=0;i<n*m;i++){
tempArray[i] = (double)tmp_set[i];
}
set->data = tempArray;
}
//This function was used to normalize data from -50 to 50 to match the random points
void input_normalisation(knn_struct *base, knn_struct *queries){
int i;
int N = base->leading_dim, ni = queries->leading_dim;
int M = base->secondary_dim, mi = queries->secondary_dim;
double maxVal=0, minVal=1;
double *tmp_data = base->data;
double *tmp_queries = queries->data;
for (i=0;i<N*M;i++){
if (tmp_data[i]>maxVal) { maxVal=tmp_data[i]; }
if (tmp_data[i]<minVal) { minVal=tmp_data[i]; }
}
for (i=0;i<ni*mi;i++){
if (tmp_queries[i]>maxVal) { maxVal=tmp_queries[i]; }
if (tmp_queries[i]<minVal) { minVal=tmp_queries[i]; }
}
if ( minVal<0 ) { minVal = minVal*(-1); }
if ( minVal>maxVal) { maxVal = minVal; }
for (i=0;i<N*M;i++){
tmp_data[i] = (tmp_data[i]*50)/maxVal;
}
for (i=0;i<ni*mi;i++){
tmp_queries[i] = (tmp_queries[i]*50)/maxVal;
}
}
void save_d(double* data, char* file, int N, int M){
FILE *outfile;
printf("Saving data to file: %s\n", file);
if((outfile=fopen(file, "wb")) == NULL){
printf("Can't open output file");
}
fwrite(data, sizeof(double), N*M, outfile);
fclose(outfile);
}
void save_int(int* data, char* file, int N, int M){
FILE *outfile;
printf("Saving data to file: %s\n", file);
if((outfile=fopen(file, "wb")) == NULL){
printf("Can't open output file");
}
fwrite(data, sizeof(int), N*M, outfile);
fclose(outfile);
}
void clean(knn_struct* d){
free(d->data);
}
//This kernel function computes the euclidean distance between queries and objects
__global__ void calculate_distance(double* queries, double* dataset, double *dist, int* k, int* n){
int numObj=*n;
int index = threadIdx.x + blockIdx.x * blockDim.x;
double tmp=0;
//Initialize distances to 0
if (threadIdx.x == 0) {
for (int i=0; i<numObj; i++){
dist[blockIdx.x*numObj + i] = 0;
}
}
__syncthreads();
//Compute and store euclidean distance
for (int ni=0;ni<(numObj);ni++){
tmp = (queries[index] - dataset[threadIdx.x + ni * blockDim.x])*(queries[index] - dataset[threadIdx.x + ni * blockDim.x]);
dist[blockIdx.x * numObj + ni] = dist[blockIdx.x * numObj + ni] + tmp;
__syncthreads();
}
__syncthreads();
}
/*This kernel function computes the knn of the temporary set of data (which is only a portion of the
original data. The way this function works is better expalined in the report sheet*/
__global__ void compute_knn(double* NNdist, double* dist, int* numObj, int* NNidx, int* bonusID, int *offset, double *tmpDist, int *tmpID, int *N){
int i, n = *numObj, off = *offset, Nol= *N;
int start = blockIdx.x * n;
int index = threadIdx.x + blockIdx.x*blockDim.x;
int position = blockDim.x+threadIdx.x+1;
double temp=0;
int tmp=0;
NNdist[index] = DBL_MAX;
NNidx[index] = (-1);
bonusID[index] = (-1);
__syncthreads();
for (i=start; i<(start+n+blockDim.x);i++){
if ( ((i-position)< (start +n)) && (i-position) >= start ){
if ( NNdist[index] > dist[i - position] ){
temp = NNdist[index];
NNdist[index] = dist[i - position];
dist[i - position] = temp;
if (bonusID[index]>=0){
tmp = NNidx[index];
NNidx[index]=bonusID[index];
bonusID[index]= tmp;
}
else{
bonusID[index] = NNidx[index];
NNidx[index] = (i - position - start) + (off*n);
}
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 7){
for (int j=0;j<(blockDim.x-1);j++){
bonusID[j+blockIdx.x*blockDim.x] = bonusID[j+blockIdx.x*blockDim.x +1];
}
bonusID[index] = (-1);
}
__syncthreads();
}
tmpDist[ off*blockDim.x + blockIdx.x*(Nol*blockDim.x) + threadIdx.x ] = NNdist[index];
tmpID[ off*blockDim.x + blockIdx.x*(Nol*blockDim.x) + threadIdx.x ] = NNidx[index];
__syncthreads();
}
/*This is the last kernel function which compares the knns that have been computed from each group
of data to find the final knn values and ids. The operation is similar to the "compute_knn" function*/
__global__ void knnSelection(double* NNdist, int* NNidx, int* originalID, double* dist, int* numObj){
int i, n=*numObj;
int start = blockIdx.x * n;
int index = threadIdx.x + blockIdx.x*blockDim.x;
double temp=0;
int tmp=0;
NNdist[index] = DBL_MAX;
NNidx[index] = (-1);
__syncthreads();
for (i=start; i<(start+n+blockDim.x);i++){
if ( ((i-blockDim.x+threadIdx.x+1)< (start +n)) && (i-blockDim.x+threadIdx.x+1) >= start ){
if ( NNdist[index] > dist[i - blockDim.x + threadIdx.x + 1] ){
temp = NNdist[index];
NNdist[index] = dist[i - blockDim.x + threadIdx.x + 1];
dist[i - blockDim.x + threadIdx.x + 1] = temp;
tmp = NNidx[index];
NNidx[index]=originalID[i - blockDim.x + threadIdx.x + 1];
originalID[i - blockDim.x + threadIdx.x + 1]= tmp;
}
}
__syncthreads();
}
__syncthreads();
}
int main(int argc, char **argv){
struct timeval first, second, lapsed;
struct timezone tzp;
int i;
int numObjects = atoi(argv[1]); //pow(2,atoi(argv[1]));
int numDim = atoi(argv[2]);
int numQueries = atoi(argv[3]);
int k = atoi(argv[4]);
int pi=pow(2,15);
int pol;
if (numObjects>pi ){pol=numObjects/pi;}
else{ pol=1; pi=numObjects; }
//Getting GPU memory's info
size_t mem_tot_0 = 0;
size_t mem_free_0 = 0;
cudaMemGetInfo(&mem_free_0, &mem_tot_0);
size_t dist_size = numQueries*numObjects*sizeof(double);
size_t data_size = numObjects*numDim*sizeof(double);
size_t queries_size = numQueries*numDim*sizeof(double);
size_t NNdist_size = numQueries*k*sizeof(double);
size_t NNidx_size = numQueries*k*sizeof(int);
size_t mem_req = dist_size + data_size + queries_size + NNdist_size + NNidx_size;
size_t mem_req1 = dist_size + data_size;
int nol;
int ni;
size_t mem_free_n = mem_free_0 - (queries_size -NNdist_size - NNidx_size);
printf("\nGPU memory needed : %ld bytes \n", mem_req1);
printf("Available GPU memory : %ld bytes \n", mem_free_n);
//Computing the number of divisions to the data for proper memory usage
if (mem_req> (mem_free_n ) ){
nol = (mem_req1/mem_free_n);
int e=0;
while (nol>2){
nol=nol/2;
e++;
}
nol=pow(2,e+1);
}
else {nol=1;}
//Value 500 can be reduced to 200 or even 100 for lower memory GPUs
if (numQueries>500) { nol = nol*(numQueries/500); }
ni = numObjects/nol;
printf("Data will be divided into %d groups \n", nol);
printf("of %d elements per group for optimal operation\n\n", ni);
char *dataset_file = "training_set.bin";
char *query_file = "query_set.bin";
char *KNNdist_file = "KNNdist.bin";
char *KNNidx_file = "KNNidx.bin" ;
double *tmpDist;
int *tmpID;
tmpDist = (double*)malloc(nol*numQueries*k*sizeof(double));
tmpID = (int*)malloc(nol*numQueries*k*sizeof(int));
printf("objects: %d\n", numObjects);
printf("dimentions: %d\n", numDim);
printf("queries: %d\n", numQueries);
printf("k: %d\n", k);
knn_struct training_set;
knn_struct query_set;
double *dist;
double *NNdist;
int *NNidx;
training_set.leading_dim = numDim;
training_set.secondary_dim = numObjects;
query_set.leading_dim = numDim;
query_set.secondary_dim = numQueries;
/*======== Memory allocation ======*/
training_set.data = (double*)malloc(numObjects*numDim*sizeof(double));
query_set.data = (double*)malloc(numQueries*numDim*sizeof(double));
NNdist = (double*)malloc(numQueries*k*sizeof(double));
NNidx = (int*)malloc(numQueries*k*sizeof(int));
dist = (double*)malloc(numObjects*numQueries*sizeof(double));
double *d_data;
double *d_queries;
double *d_dist;
double *d_NNdist;
int *d_NNidx;
int *d_bonusID;
int *dev_k, *dev_n;
int *dev_nol;
double *d_tmpDist;
int *d_tmpID;
//GPU memory allocation
CUDA_CHECK_RETURN(cudaMalloc( (void **)&d_data, ni*numDim*sizeof(double)) );
CUDA_CHECK_RETURN(cudaMalloc( (void **)&d_queries, numQueries*numDim*sizeof(double)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&d_NNdist, numQueries*k*sizeof(double)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&d_NNidx, numQueries*k*sizeof(int)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&d_bonusID, numQueries*k*sizeof(int)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&d_tmpDist, nol*numQueries*k*sizeof(double)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&d_tmpID, nol*numQueries*k*sizeof(int)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&d_dist, numQueries*ni*sizeof(double)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_k, sizeof(int)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_n, sizeof(int)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_nol, sizeof(int)) );
/*======== initialize =========*/
//Input option 1. Create random data
random_initialization(&training_set, 1);
random_initialization(&query_set, 2);
//Input option 2. Import data from file
//initialize(&training_set);
//initialize(&query_set);
/*The following function was used to normalize imported data values from benchmark files
to be from -50 to 50, because there was some undefined error in previous compilations.
It MAY OR MAY NOT BE USED */
//input_normalisation(&training_set, &query_set);
//Copy data to GPU memory
CUDA_CHECK_RETURN(cudaMemcpy( d_queries, query_set.data, numQueries*numDim*sizeof(double), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN(cudaMemcpy( dev_k, &k, sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN(cudaMemcpy( dev_nol, &nol, sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN(cudaMemcpy( dev_n, &ni, sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN(cudaMemcpy( d_tmpDist, tmpDist, nol*numQueries*k*sizeof(double), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN(cudaMemcpy( d_tmpID, tmpID, nol*numQueries*k*sizeof(int), cudaMemcpyHostToDevice ) );
CUDA_CHECK_RETURN(cudaMemcpy( d_dist, dist, numQueries*ni*sizeof(double), cudaMemcpyHostToDevice ) );
int offset=0;
int *dev_off=0;
CUDA_CHECK_RETURN( cudaMalloc( (void **)&dev_off, sizeof(int)) );
gettimeofday(&first, &tzp);
/*This for loop is the core of the program. It sends each group of data to the GPU
where it computes the distances between each query and each object. Then calls the
"compute_knn" kernel function to find the actual KNNs*/
for ( offset=0 ; offset<nol ; offset++ ){
CUDA_CHECK_RETURN(cudaMemcpy( d_data, (training_set.data +offset*ni), ni*numDim*sizeof(double), cudaMemcpyHostToDevice ) );
calculate_distance<<< numQueries, numDim>>> ( d_queries, d_data, d_dist, dev_k, dev_n);
CUDA_CHECK_RETURN( cudaPeekAtLastError() );
CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
CUDA_CHECK_RETURN(cudaMemcpy( dev_off, &offset, sizeof(int), cudaMemcpyHostToDevice ) );
compute_knn<<< numQueries, k>>>(d_NNdist, d_dist, dev_n, d_NNidx, d_bonusID, dev_off, d_tmpDist, d_tmpID, dev_nol);
CUDA_CHECK_RETURN( cudaPeekAtLastError() );
CUDA_CHECK_RETURN( cudaDeviceSynchronize() );
}
CUDA_CHECK_RETURN(cudaMemcpy( tmpDist, d_tmpDist, nol*numQueries*k*sizeof(double), cudaMemcpyDeviceToHost ) );
CUDA_CHECK_RETURN(cudaMemcpy( tmpID, d_tmpID, nol*numQueries*k*sizeof(int), cudaMemcpyDeviceToHost ) );
/*In case of division of the original data, the temporary KNNs are being compared
to form the final KNNs*/
if (nol!=1){
pi=nol*k;
cudaMemcpy( dev_n, &pi, sizeof(int), cudaMemcpyHostToDevice ) ;
knnSelection<<< numQueries, k>>>(d_NNdist, d_NNidx, d_tmpID, d_tmpDist, dev_n);
CUDA_CHECK_RETURN( cudaPeekAtLastError() );
}
gettimeofday(&second, &tzp);
printf("\n---KNN computed--- \n\n");
if(first.tv_usec>second.tv_usec){
second.tv_usec += 1000000;
second.tv_sec--;
}
lapsed.tv_usec = second.tv_usec - first.tv_usec;
lapsed.tv_sec = second.tv_sec - first.tv_sec;
printf("Time elapsed: %ld, %ld s\n", lapsed.tv_sec, lapsed.tv_usec);
CUDA_CHECK_RETURN( cudaMemcpy( NNdist, d_NNdist, numQueries*k*sizeof(double), cudaMemcpyDeviceToHost ) );
CUDA_CHECK_RETURN( cudaMemcpy( NNidx, d_NNidx, numQueries*k*sizeof(int), cudaMemcpyDeviceToHost ) );
save_d(query_set.data, query_file, numQueries, numDim);
save_d(training_set.data, dataset_file, numObjects, numDim);
save_d(NNdist, KNNdist_file, k, numQueries);
save_int(NNidx, KNNidx_file, k, numQueries);
/*===== clean memory ========*/
clean(&training_set);
clean(&query_set);
free(NNdist);
free(NNidx);
free(dist);
cudaFree(d_queries);
cudaFree(d_data);
cudaFree(d_NNdist);
cudaFree(d_NNidx);
cudaFree(dev_k);
cudaFree(dev_n);
cudaFree(d_dist);
}
|
8,605 | #include <cmath>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
#define HANDLE_NULL(a) \
{ \
if (a == NULL) { \
printf("Host memory failed in %s at line %d\n", __FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
}
__global__ void saxpy(int *x, int *y, int alpha, size_t N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
y[i] = alpha * y[i] + x[i];
}
}
void initialize_list(int *x, int N) {
for (int i = 0; i < N; i++) {
x[i] = rand();
}
}
int main(void) {
int N = std::pow(10, 7);
int alpha = 2;
int *x, *y;
x = (int *)(malloc(N * sizeof(int)));
y = (int *)(malloc(N * sizeof(int)));
srand(time(NULL));
initialize_list(x, N);
initialize_list(y, N);
int *d_x, *d_y;
HANDLE_ERROR( cudaMalloc((void **)&d_x, N * sizeof(int)) );
cudaMalloc((void **)&d_y, N * sizeof(int));
cudaMemcpy(d_x, x, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(int), cudaMemcpyHostToDevice);
saxpy<<<(N + 255) / 256, 256>>>(d_x, d_y, alpha, N);
int *c;
c = (int *)(malloc(N * sizeof(int)));
cudaMemcpy(c, d_y, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("[");
for (int i = 0; i < N; i++) {
if (i < 10) {
printf("%d ", c[i]);
}
if (y[i] * alpha + x[i] != c[i]) {
printf("YOU SCREWED UP!");
}
}
printf(" ... ]");
cudaFree(d_x);
return 0;
}
|
8,606 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// DEVICE CODE:
// Kernel:
__global__ void hello_cuda(){
printf("Hello CUDA world!\n");
}
// HOST CODE
int main(){
// launching kernel:
hello_cuda<<<1,20>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
8,607 | //#define _USE_MATH_DEFINES
//
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <math.h>
//#include <stdio.h>
//#include <conio.h>
//#include <iostream>
//#include <fstream>
//#include <ctime>
//#include <string>
//
//void GetInputAndCalcInfluence();
//void GetInputAndCalcDistr();
//void CreateMatrix(double* Matrixij, double* BECoords, double* BE, int BeNumber, int BEInfoSize);
//void CreateNodes(double* Matrixij, int size, double* Coeffs, int BeNumber, double* BE, int BEInfoSize);
//
//// cuda function create influence matrix with pointer 'Matrixij' for boundary condition in points with coords 'BECoords',
//// but boundary elements are in coords 'BE'
//__global__ void MatrixCreation(double* Matrixij, double* BECoords, double* BE)
//{
// int i = blockIdx.x;
// int j0 = threadIdx.x;
// int index = i * blockDim.x + j0;
//
// // coords of calculation point which is be coords with shift to avoid undeterminated state
// double x = BECoords[i];
// double y = BECoords[i + 1];
//
// if (j0 % 2)
// {
// int j = (j0 - 1) / 2 * (19 + 7) + 19;
// Matrixij[index] = (-6 * atanf((-BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) / ((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]))) * ((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6])) *
// (3 * powf(BE[j + 5], 2) + powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) - 3 * powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) +
// (BE[j + 5] - (-BE[j + 3] + x) * cosf(BE[j + 6]) - (-BE[j + 4] + y) * sinf(BE[j + 6])) * (-16 * powf(BE[j + 5], 2) - 6 * powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) +
// 5 * BE[j + 5] * ((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) + 11 * powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) -
// 3 * logf(powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) + powf(-BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) *
// (powf(BE[j + 5], 3) + 3 * powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) * ((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) -
// powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 3) + 3 * powf(BE[j + 5], 2) * (-BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])))) / (36. * powf(BE[j + 5], 2) * M_PI) -
// (-6 * atanf((BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) / ((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]))) * ((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6])) *
// (3 * powf(BE[j + 5], 2) + powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) - 3 * powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) +
// (-BE[j + 5] - (-BE[j + 3] + x) * cosf(BE[j + 6]) - (-BE[j + 4] + y) * sinf(BE[j + 6])) * (-16 * powf(BE[j + 5], 2) - 6 * powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) -
// 5 * BE[j + 5] * ((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) + 11 * powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) -
// 3 * logf(powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) + powf(BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) *
// (-powf(BE[j + 5], 3) + 3 * powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) * ((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) -
// powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 3) + 3 * powf(BE[j + 5], 2) * (BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])))) / (36. * powf(BE[j + 5], 2) * M_PI);
// }
// else
// {
// int j = j0 / 2 * (19 + 7);
// Matrixij[index] = -(12 * atanf((BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) / ((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]))) * ((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9])) *
// (powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - 3 * BE[j + 10] * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) -
// 3 * powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) +
// (BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) * (9 * BE[j + 10] * (-BE[j + 10] + 3 * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]))) +
// 2 * (2 * powf(BE[j + 10], 2) - 6 * powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - 5 * BE[j + 10] * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) +
// 11 * powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2))) +
// logf(powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) + powf(BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) *
// (9 * BE[j + 10] * (powf(BE[j + 10], 2) + powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) +
// 6 * (-powf(BE[j + 10], 3) + 3 * powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) -
// powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 3)))) / (144. * powf(BE[j + 10], 2) * M_PI) +
// (12 * atanf((-BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) / ((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]))) * ((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9])) *
// (powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - 3 * BE[j + 10] * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) -
// 3 * powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) +
// (-BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) * (9 * BE[j + 10] * (BE[j + 10] + 3 * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]))) +
// 2 * (2 * powf(BE[j + 10], 2) - 6 * powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) + 5 * BE[j + 10] * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) +
// 11 * powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2))) +
// logf(powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) + powf(-BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) *
// (9 * BE[j + 10] * (powf(BE[j + 10], 2) + powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) +
// 6 * (powf(BE[j + 10], 3) + 3 * powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) -
// powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 3)))) / (144. * powf(BE[j + 10], 2) * M_PI) -
// (-12 * atanf((-BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) / ((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]))) *
// ((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17])) * (powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) +
// 3 * BE[j + 18] * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) - 3 * powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) +
// (-BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) * (9 * BE[j + 18] * (BE[j + 18] + 3 * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]))) -
// 2 * (2 * powf(BE[j + 18], 2) - 6 * powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) + 5 * BE[j + 18] * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) +
// 11 * powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2))) +
// logf(powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) + powf(-BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) *
// (9 * BE[j + 18] * (powf(BE[j + 18], 2) + powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) - powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) +
// 6 * (-powf(BE[j + 18], 3) - 3 * powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) +
// powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 3)))) / (144. * powf(BE[j + 18], 2) * M_PI) +
// (-12 * atanf((BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) / ((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]))) *
// ((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17])) * (powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) +
// 3 * BE[j + 18] * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) - 3 * powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) +
// (BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) * (9 * BE[j + 18] * (-BE[j + 18] + 3 * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]))) -
// 2 * (2 * powf(BE[j + 18], 2) - 6 * powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) - 5 * BE[j + 18] * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) +
// 11 * powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2))) +
// logf(powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) + powf(BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) *
// (9 * BE[j + 18] * (powf(BE[j + 18], 2) + powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) - powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) +
// 6 * (powf(BE[j + 18], 3) - 3 * powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) +
// powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 3)))) / (144. * powf(BE[j + 18], 2) * M_PI);
// }
//}
//
//// cuda function calculate one term for one be coord and add it to value belong to be coord
//__global__ void CalculateNodes(double* Matrixij, double* Coeff, double* BE)
//{
// int i = blockIdx.x; // index by x axis
// int j0 = blockIdx.y; // index by be element
// int k = threadIdx.x; // index by y axis
//
// int index = (i * blockDim.x + k) * 3;
//
// double x = Matrixij[index];
// double y = Matrixij[index + 1];
//
// if (j0 % 2) {
// int j = (j0 - 1) / 2 * (19 + 7) + 19;
// double increment = Coeff[j0] * ((-6 * atanf((-BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) / ((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]))) * ((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6])) *
// (3 * powf(BE[j + 5], 2) + powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) - 3 * powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) +
// (BE[j + 5] - (-BE[j + 3] + x) * cosf(BE[j + 6]) - (-BE[j + 4] + y) * sinf(BE[j + 6])) * (-16 * powf(BE[j + 5], 2) - 6 * powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) +
// 5 * BE[j + 5] * ((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) + 11 * powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) -
// 3 * logf(powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) + powf(-BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) *
// (powf(BE[j + 5], 3) + 3 * powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) * ((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) -
// powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 3) + 3 * powf(BE[j + 5], 2) * (-BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])))) / (36. * powf(BE[j + 5], 2) * M_PI) -
// (-6 * atanf((BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) / ((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]))) * ((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6])) *
// (3 * powf(BE[j + 5], 2) + powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) - 3 * powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) +
// (-BE[j + 5] - (-BE[j + 3] + x) * cosf(BE[j + 6]) - (-BE[j + 4] + y) * sinf(BE[j + 6])) * (-16 * powf(BE[j + 5], 2) - 6 * powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) -
// 5 * BE[j + 5] * ((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) + 11 * powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) -
// 3 * logf(powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) + powf(BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 2)) *
// (-powf(BE[j + 5], 3) + 3 * powf((-BE[j + 4] + y) * cosf(BE[j + 6]) + (BE[j + 3] - x) * sinf(BE[j + 6]), 2) * ((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])) -
// powf((-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6]), 3) + 3 * powf(BE[j + 5], 2) * (BE[j + 5] + (-BE[j + 3] + x) * cosf(BE[j + 6]) + (-BE[j + 4] + y) * sinf(BE[j + 6])))) / (36. * powf(BE[j + 5], 2) * M_PI));
// atomicAdd(&(Matrixij[index + 2]), increment);
// }
// else {
// int j = j0 / 2 * (19 + 7);
// double increment = Coeff[j0] * (-(12 * atanf((BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) / ((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]))) * ((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9])) *
// (powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - 3 * BE[j + 10] * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) -
// 3 * powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) +
// (BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) * (9 * BE[j + 10] * (-BE[j + 10] + 3 * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]))) +
// 2 * (2 * powf(BE[j + 10], 2) - 6 * powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - 5 * BE[j + 10] * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) +
// 11 * powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2))) +
// logf(powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) + powf(BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) *
// (9 * BE[j + 10] * (powf(BE[j + 10], 2) + powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) +
// 6 * (-powf(BE[j + 10], 3) + 3 * powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) -
// powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 3)))) / (144. * powf(BE[j + 10], 2) * M_PI) +
// (12 * atanf((-BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) / ((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]))) * ((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9])) *
// (powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - 3 * BE[j + 10] * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) -
// 3 * powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) +
// (-BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) * (9 * BE[j + 10] * (BE[j + 10] + 3 * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]))) +
// 2 * (2 * powf(BE[j + 10], 2) - 6 * powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) + 5 * BE[j + 10] * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) +
// 11 * powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2))) +
// logf(powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) + powf(-BE[j + 10] + (-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) *
// (9 * BE[j + 10] * (powf(BE[j + 10], 2) + powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) - powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 2)) +
// 6 * (powf(BE[j + 10], 3) + 3 * powf((-BE[j + 6] + y) * cosf(BE[j + 9]) + (BE[j + 5] - x) * sinf(BE[j + 9]), 2) * ((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9])) -
// powf((-BE[j + 5] + x) * cosf(BE[j + 9]) + (-BE[j + 6] + y) * sinf(BE[j + 9]), 3)))) / (144. * powf(BE[j + 10], 2) * M_PI) -
// (-12 * atanf((-BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) / ((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]))) *
// ((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17])) * (powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) +
// 3 * BE[j + 18] * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) - 3 * powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) +
// (-BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) * (9 * BE[j + 18] * (BE[j + 18] + 3 * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]))) -
// 2 * (2 * powf(BE[j + 18], 2) - 6 * powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) + 5 * BE[j + 18] * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) +
// 11 * powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2))) +
// logf(powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) + powf(-BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) *
// (9 * BE[j + 18] * (powf(BE[j + 18], 2) + powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) - powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) +
// 6 * (-powf(BE[j + 18], 3) - 3 * powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) +
// powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 3)))) / (144. * powf(BE[j + 18], 2) * M_PI) +
// (-12 * atanf((BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) / ((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]))) *
// ((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17])) * (powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) +
// 3 * BE[j + 18] * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) - 3 * powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) +
// (BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) * (9 * BE[j + 18] * (-BE[j + 18] + 3 * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]))) -
// 2 * (2 * powf(BE[j + 18], 2) - 6 * powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) - 5 * BE[j + 18] * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) +
// 11 * powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2))) +
// logf(powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) + powf(BE[j + 18] + (-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) *
// (9 * BE[j + 18] * (powf(BE[j + 18], 2) + powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) - powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 2)) +
// 6 * (powf(BE[j + 18], 3) - 3 * powf((-BE[j + 14] + y) * cosf(BE[j + 17]) + (BE[j + 13] - x) * sinf(BE[j + 17]), 2) * ((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17])) +
// powf((-BE[j + 13] + x) * cosf(BE[j + 17]) + (-BE[j + 14] + y) * sinf(BE[j + 17]), 3)))) / (144. * powf(BE[j + 18], 2) * M_PI));
// atomicAdd(&(Matrixij[index + 2]), increment);
// }
//}
//
//// function using CUDA function "MatrixCreation" create influence matrix by pointer Matrixij[size*size]
//void CreateMatrix(double* Matrixij, double* BECoords, double* BE, int BeNumber, int BEInfoSize)
//{
// double* dev_a, * dev_b, * dev_c;
//
// cudaSetDevice(0);
//
// cudaMalloc((void**)&dev_a, BeNumber * BeNumber * sizeof(double));
// cudaMalloc((void**)&dev_b, 2 * BeNumber * sizeof(double));
// cudaMalloc((void**)&dev_c, BEInfoSize * sizeof(double));
// cudaMemcpy(dev_a, Matrixij, BeNumber * BeNumber * sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(dev_b, BECoords, BeNumber * 2 * sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(dev_c, BE, BEInfoSize * sizeof(double), cudaMemcpyHostToDevice);
//
// dim3 blockSize = dim3(BeNumber, 1, 1);
// dim3 gridSize = dim3(BeNumber, 1, 1);
//
// MatrixCreation << <gridSize, blockSize >> > (dev_a, dev_b, dev_c);
//
// cudaEvent_t syncEvent;
// cudaEventCreate(&syncEvent);
// cudaEventRecord(syncEvent, 0);
// cudaEventSynchronize(syncEvent);
//
// cudaMemcpy(Matrixij, dev_a, BeNumber * BeNumber * sizeof(double), cudaMemcpyDeviceToHost);
//}
//
//void CreateNodes(double* Matrixij, int size, double* Coeffs, int BeNumber, double* BE, int BEInfoSize)
//{
// double* dev_a, * dev_b, * dev_c;
//
// cudaSetDevice(0);
//
// cudaMalloc((void**)&dev_a, 3 * size * size * sizeof(double));
// cudaMalloc((void**)&dev_b, BeNumber * sizeof(double));
// cudaMalloc((void**)&dev_c, BEInfoSize * sizeof(double));
// cudaMemcpy(dev_a, Matrixij, 3 * size * size * sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(dev_b, Coeffs, BeNumber * sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(dev_c, BE, BEInfoSize * sizeof(double), cudaMemcpyHostToDevice);
//
// dim3 blockSize = dim3(size, 1, 1);
// dim3 gridSize = dim3(size, BeNumber, 1);
//
// CalculateNodes << < gridSize, blockSize >> > (dev_a, dev_b, dev_c);
//
// cudaEvent_t syncEvent;
// cudaEventCreate(&syncEvent);
// cudaEventRecord(syncEvent, 0);
// cudaEventSynchronize(syncEvent);
//
// cudaMemcpy(Matrixij, dev_a, 3 * size * size * sizeof(double), cudaMemcpyDeviceToHost);
//}
//
//void GetInputAndCalcInfluence()
//{
// double* Coords, * BE;
// double* Matrixij;
//
// int bediscr_array[] = { 5,10,15,20,25,30,35 };
// string input_file_name;
// string output_file_name;
//
// for (int i = 0; i < 7; i++)
// {
// int bediscr = bediscr_array[i];
// int CoordsNumber, BEInfoSize;
//
// input_file_name = "D:/Docs/article_03_20/data/shifted_coords" + to_string(bediscr) + ".txt";
// ifstream in;
// in.open(input_file_name);
//
// in >> CoordsNumber;
// Coords = new double[CoordsNumber];
//
// for (int i = 0; i < CoordsNumber; i++)
// in >> Coords[i];
//
// in.close();
//
// input_file_name = "D:/Docs/article_03_20/data/beinfo" + to_string(bediscr) + ".txt";
// in.open(input_file_name);
//
// in >> BEInfoSize;
// BE = new double[BEInfoSize];
//
// for (int i = 0; i < BEInfoSize; i++)
// in >> BE[i];
//
// in.close();
//
// int benumb = CoordsNumber / 2;
//
// Matrixij = new double[benumb * benumb];
//
// unsigned int start_time;
// unsigned int end_time;
// unsigned int search_time = 0;
//
// /////////////// CUDA method //////////////////////////
// CreateMatrix(Matrixij, Coords, BE, benumb, BEInfoSize);
// start_time = clock();
// CreateMatrix(Matrixij, Coords, BE, benumb, BEInfoSize);
// end_time = clock();
// search_time = end_time - start_time;
//
// output_file_name = "D:/Docs/article_03_20/CUDA_output/matr" + to_string(bediscr) + ".txt";
// ofstream out;
// out.open(output_file_name);
//
// for (int i = 0; i < benumb; i++)
// for (int j = 0; j < benumb; j++)
// out << Matrixij[i * benumb + j] << '\n';
// out << search_time;
// out.close();
//
// /////////////// sequential method ////////////////////
// start_time = clock();
// MatrixCreationSeq(Matrixij, Coords, BE, benumb);
// end_time = clock();
// search_time = end_time - start_time;
//
// output_file_name = "D:/Docs/article_03_20/SEQ_output/matr" + to_string(bediscr) + ".txt";
// out.open(output_file_name);
//
// for (int i = 0; i < benumb; i++)
// for (int j = 0; j < benumb; j++)
// out << Matrixij[i * benumb + j] << '\n';
// out << search_time;
// out.close();
//
// ///////////////////////////////////////////////////////
//
// delete(Matrixij);
// delete(BE);
// delete(Coords);
// }
//}
//
//void GetInputAndCalcDistr()
//{
// int bediscr_array[] = { 5,10,15,20,25,30,35 };
// int areadiscr_array[] = { 10, 20, 30, 40, 50 };
// double* BE, * Coefs;
// int CoefsNumb, BEInfoSize;
// string input_file_name;
// string output_file_name;
// int bediscr, areadiscr;
//
// for (int k = 0; k < 7; k++)
// {
// bediscr = bediscr_array[k];
// input_file_name = "D:/Docs/article_03_20/data/coefs" + to_string(bediscr) + ".txt";
// ifstream in;
// in.open(input_file_name);
//
// in >> CoefsNumb;
// Coefs = new double[CoefsNumb];
//
// for (int i = 0; i < CoefsNumb; i++)
// in >> Coefs[i];
//
// in.close();
//
// input_file_name = "D:/Docs/article_03_20/data/beinfo" + to_string(bediscr) + ".txt";
// in.open(input_file_name);
//
// in >> BEInfoSize;
// BE = new double[BEInfoSize];
//
// for (int i = 0; i < BEInfoSize; i++)
// in >> BE[i];
//
// in.close();
//
// for (int s = 0; s < 5; s++)
// {
// areadiscr = areadiscr_array[s];
// float discrx = (WIDTH - 2 * EPS) / (areadiscr - 1);
// float discry = (HEIGHT - 2 * EPS) / (areadiscr - 1);
//
// int matrixsize = areadiscr * areadiscr * 3;
// double* Matrixij = new double[matrixsize];
//
// for (int i = 0; i < areadiscr; i++)
// for (int j = 0; j < areadiscr; j++)
// {
// int idx = (i * areadiscr + j) * 3;
// Matrixij[idx] = -WIDTH / 2 + EPS + discrx * i;
// Matrixij[idx + 1] = -EPS - discry * j;
// Matrixij[idx + 2] = 0;
// }
//
// unsigned int start_time;
// unsigned int end_time;
// unsigned int search_time = 0;
//
// CreateNodes(Matrixij, areadiscr, Coefs, CoefsNumb, BE, BEInfoSize); // startup calculation to activate cuda memory
// start_time = clock();
// CreateNodes(Matrixij, areadiscr, Coefs, CoefsNumb, BE, BEInfoSize);
// end_time = clock();
// search_time = end_time - start_time;
//
// output_file_name = "D:/Docs/article_03_20/CUDA_Output/node" + to_string(bediscr) + "-" + to_string(areadiscr) + ".txt";
// ofstream out;
// out.open(output_file_name);
//
// for (int i = 0; i < matrixsize; i = i + 3)
// out << Matrixij[i] << " " << Matrixij[i + 1] << " " << Matrixij[i + 2] << '\n';
// out << search_time;
//
// out.close();
//
// /////////////// sequential method ////////////////////
// start_time = clock();
// CalculateNodesSeq(Matrixij, areadiscr, Coefs, CoefsNumb, BE);
// end_time = clock();
// search_time = end_time - start_time;
//
// output_file_name = "D:/Docs/article_03_20/SEQ_output/node" + to_string(bediscr) + "-" + to_string(areadiscr) + ".txt";
// out.open(output_file_name);
//
// for (int i = 0; i < matrixsize; i = i + 3)
// out << Matrixij[i] << " " << Matrixij[i + 1] << " " << Matrixij[i + 2] << '\n';
// out << search_time;
// out.close();
//
// ///////////////////////////////////////////////////////
//
// delete(Matrixij);
// }
//
// delete(Coefs);
// delete(BE);
// }
//} |
8,608 | #include "includes.h"
// create an image buffer. return host ptr, pass out device pointer through pointer to pointer
__global__ void sobelfilter_kernel(int iw, int ih, unsigned char *source, unsigned char *dest)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Operate only if we are in the correct boundaries
if(x > 0 && x < iw - 1 && y > 0 && y < ih - 1)
{
int gx = -source[iw*(y-1)+(x-1)] + source[iw*(y-1)+(x+1)] +
-2*source[iw*(y)+(x-1)] + 2*source[iw*(y)+(x+1)] +
-source[iw*(y+1)+(x-1)] + source[iw*(y+1)+(x+1)];
int gy = -source[iw*(y-1)+(x-1)] - 2*source[iw*(y-1)+(x)]
-source[iw*(y-1)+(x+1)] +
source[iw*(y+1)+(x-1)] + 2*source[iw*(y+1)+(x)] +
source[iw*(y+1)+(x+1)];
dest[iw*y+x] = (int) sqrt((float)(gx)*(float)(gx) + (float)(gy)*(float)(gy));
}
} |
8,609 | /* Copyright 2012 Jeffrey Blanchard and Jared Tanner
*
* GPU Accelerated Greedy Algorithms for Compressed Sensing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// ************ GLOBAL FUNCTIONS (kernels) **************
// ************ Thresholding functions *************
__global__ void threshold_one(float *vec, float *vec_thres, int *bin, const int k_bin, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
// xIndex is a value from 1 to k from the vector ind
if ( (xIndex < n) & (bin[xIndex]<=k_bin) )
vec_thres[xIndex]=vec[xIndex];
}
__global__ void threshold(float *vec, int *bin, const int k_bin, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
// xIndex is a value from 1 to k from the vector ind
if ( (xIndex < n) & (bin[xIndex]>k_bin) )
vec[xIndex]=0.0f;
}
// This is used in findSupportSet_sort
__global__ void threshold_and_support(float *vec, int *support, const int n, const float T)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) {
if (abs(vec[xIndex])<T) {
vec[xIndex] = 0.0f;
support[xIndex]=2;
}
}
}
// ******** Vector Definitions ************
__global__ void zero_vector_float(float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n )
vec[xIndex]=0.0f;
}
__global__ void zero_vector_int(int *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n ){
int z=0;
vec[xIndex]=z;
}
}
__global__ void one_vector_float(float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n )
vec[xIndex]=1.0f;
}
__global__ void one_vector_int(int *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n )
vec[xIndex]=1;
}
// ********** Kernels for Linear Binning Operations when finding supports **************
__global__ void LinearBinning(float *vec, int *bin, int *bin_counters, const int num_bins, const int MaxBin, const int n, const float slope, const float intercept)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
float temp = abs(vec[xIndex]);
if ( xIndex < n ){
if ( temp > (intercept *.000001) ){
bin[xIndex]=max(0.0f,slope * (intercept - temp));
if (bin[xIndex]<MaxBin) atomicAdd(bin_counters+bin[xIndex],1);
}
else bin[xIndex] = slope * intercept + 1.0f;
}
}
// ********** Kernels that deal with index sets ***************
// This kernel takes MATLAB 1-indexing and transforms to 0-indexing
void __global__ indexShiftDown(int * d_rows, const int m)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < m) d_rows[xIndex] = d_rows[xIndex]-1;
}
// This is a kernel to take the union of two supports for CSMPSP
void __global__ joinSupports(int * d_bin, int * d_bin_grad, const int k_bin, const int k_bin_grad, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n){
if (d_bin_grad[xIndex] <= k_bin_grad){
if (d_bin[xIndex] > k_bin){
d_bin[xIndex] = k_bin;
}
}
}
}
// *********** Used in results functions (results.cu) *************
__global__ void check_support(float * vec_input, float * vec, const int n, int * support_counter)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) {
if ( vec_input[xIndex] != 0 ) {
if (vec[xIndex] != 0) {
atomicAdd(support_counter, 1);
}
}
else {
if (vec[xIndex] == 0) {
atomicAdd(support_counter + 1, 1);
}
}
}
}
// ************** Used in functions.cu ****************
// This is used in FindSupportSet_sort in order to sort the magnitudes
__global__ void magnitudeCopy(float *mag_vec, float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) { mag_vec[xIndex] = abs(vec[xIndex]); }
}
/*
**************************************************************
** VARIOUS KERNELS USED IN DEVELOPMENT BUT NO LONGER ACTIVE **
**************************************************************
__global__ void make_bins(float *vec, int *bin, const int num_bins, const int n, const float slope, const float intercept)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n ){
int bin_new_val;
float temp = abs(vec[xIndex]);
if ( temp > (intercept *.000001) ){
bin_new_val=slope * (intercept - temp);
}
else bin_new_val = num_bins;
bin[xIndex]=bin_new_val;
}
}
__global__ void count_bins(int *bin, int *bin_counters, const int num_bins, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( (xIndex < n) & (bin[xIndex]<num_bins) )
atomicAdd(bin_counters+bin[xIndex],1);
}
__global__ void make_and_count_bins(float *vec, int *bin, int *bin_counters, const int num_bins, const int n, const float slope, const float intercept)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
float temp = abs(vec[xIndex]);
if ( xIndex < n ){
if ( temp > (intercept *.01) ){
bin[xIndex]=max(0.0f,slope * (intercept - temp));
atomicAdd(bin_counters+bin[xIndex],1);
}
else bin[xIndex] = slope * intercept + 1.0f;
}
}
// This is used to test behavior of skipping cudaThreadSync();
__global__ void count_zero_one(float *vec, float *data, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( (xIndex < n) ){
if (vec[xIndex] == 0)
atomicAdd(data,1);
else if (vec[xIndex] == 1)
atomicAdd(data+1,1);
}
}
__global__ void countRest(int *bin, int *bin_counters, const int num_bins, const int maxBin, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( (xIndex < n) & (bin[xIndex]<num_bins) )
if (bin[xIndex]>= maxBin) atomicAdd(bin_counters+bin[xIndex],1);
}
__global__ void make_and_count_bins_alt(float *vec, int *bin, int *bin_counters, const int num_bins, const int n, const float slope, const float intercept, const float )
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
float temp = abs(vec[xIndex]);
if ( xIndex < n ){
bin[xIndex]=max(0.0f,slope * (intercept - temp));
if ( temp > (intercept *.000001) ){
atomicAdd(bin_counters+bin[xIndex],1);
}
else bin[xIndex] = slope * intercept + 1.0f;
}
}
__global__ void update_bins(float *vec, int *bin, int *bin_counters, const int num_bins, const int n, const float slope, const float intercept)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n ){
int bin_new_val;
float temp = abs(vec[xIndex]);
if ( temp > (intercept *.000001) ){
bin_new_val=slope * (intercept - temp);
}
else bin_new_val = num_bins;
if ( bin[xIndex] != bin_new_val ){
if (bin[xIndex] < num_bins)
atomicAdd(bin_counters+bin[xIndex],-1);
if ( bin_new_val < num_bins )
atomicAdd(bin_counters+bin[xIndex],1);
bin[xIndex]=bin_new_val;
}
}
}
__global__ void dyadicAdd(int * counter, const int length, const int shift)
{
if (shift > 0) {
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int adds = 2*shift;
int Index = adds*(xIndex+1)-1;
if (Index < length) {
counter[Index] = counter[Index] + counter[Index-shift];
}
}
}
// Soft thresholding used in Lee and Wright's SPARSA
__global__ void __soft(float* y, const float* x, float T, int m)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
float x_e, y_e;
if(xIndex < m)
{
x_e = x[xIndex];
y_e = fmaxf(fabsf(x_e) - T, 0.f);
y[xIndex] = y_e / (y_e + T) * x_e;
}
}
__global__ void halve_bins(int *bin, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n )
bin[xIndex] = bin[xIndex]/2;
}
__global__ void add_adjacent(int *vec, int *vec_shorter, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n )
vec_shorter[xIndex] = vec[2 * xIndex] + vec[(2 * xIndex) +1];
}
__global__ void int_copy(int *vec_to, int *vec_from, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if ( xIndex < n )
vec_to[xIndex] = vec_from[xIndex];
}
__device__ float getAbsMax(float * d_vec, const int length)
{
int jj=0;
float segmentMax = 0;
for (jj=0; jj<length; jj++) {
if ( segmentMax < abs(d_vec[jj]) ) segmentMax = abs(d_vec[jj]);
}
return segmentMax;
}
__host__ __device__ float getMax(float * vec, const int length)
{
int jj=0;
float segmentMax = 0.0f;
for (jj=0; jj<length; jj++) {
if ( segmentMax < vec[jj] ) segmentMax = vec[jj];
}
return segmentMax;
}
__global__ void segmentMax(float* d_vec, float *segmentMaxes, const int length, const int HighLength, const int HighSegmentLength, const int threadsHigh, const int LowSegmentLength)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int startIndex, SegmentLength;
if ( (xIndex*HighSegmentLength > HighLength) & ( (HighLength + (xIndex-threadsHigh+1)*LowSegmentLength) < length ) ){
startIndex = HighLength + (xIndex-threadsHigh)*LowSegmentLength;
SegmentLength = LowSegmentLength;
}
else {
startIndex = xIndex*HighSegmentLength;
SegmentLength = HighSegmentLength;
}
segmentMaxes[xIndex] = getAbsMax(d_vec+startIndex, SegmentLength);
}
__global__ void GetSegmentMax(float * segmentMaxes, float* maxValue, const int length)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex <1) {
float mxVal = getMax(segmentMaxes, length);
maxValue[0] = 0.5f; //mxVal;
}
}
// ** Speed up the counting by not using atomicAdd **
__device__ void MakeCountSegment(float *segment, int *bins, const int seglength, int *segCounter, const int countlength, const float low, const float high, const float slope)
{
int bin;
float temp;
for (int jj=0; jj<seglength; jj++){
temp = abs(segment[jj]);
if ( ( temp > low ) & ( temp < high ) ) {
bin = (int)ceil(slope*abs(high-temp));
}
else if (temp >= high) {
bin = 0;
}
else bin = countlength - 1;
bins[jj]=bin;
segCounter[bin] = segCounter[bin] + 1;
}
return;
}
__device__ void MakeCountSegment_sharedAtomic(float *segment, int *bins, const int seglength, int *segCounter, int *s_counter, const int countlength, const float low, const float high, const float slope)
{
int bin;
float temp;
for (int jj=0; jj<seglength; jj++){
temp = abs(segment[jj]);
if ( ( temp > low ) & ( temp < high ) ) {
bin = (int)ceil(slope*abs(high-temp));
}
else if (temp >= high) {
bin = 0;
}
else bin = countlength - 1;
bins[jj]=bin;
atomicAdd(s_counter+bin,1);
}
for (int jj=0; jj<countlength; jj++) segCounter[jj]=s_counter[jj];
return;
}
__global__ void make_and_count_seg(float *vec, int *bin, int *segcounter, const int length, const int countlength, const int HighLength, const int HighSegmentLength, const int threadsHigh, const int LowSegmentLength, const float low, const float high, const float slope)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int startIndex, SegmentLength, startCountIndex;
startCountIndex = xIndex*countlength;
if ( (xIndex*HighSegmentLength > HighLength) & ( (HighLength + (xIndex-threadsHigh+1)*LowSegmentLength) < length ) ){
startIndex = HighLength + (xIndex-threadsHigh)*LowSegmentLength;
SegmentLength = LowSegmentLength;
}
else {
startIndex = xIndex*HighSegmentLength;
SegmentLength = HighSegmentLength;
}
MakeCountSegment(vec+startIndex, bin+startIndex, SegmentLength, segcounter+startCountIndex, countlength, low, high, slope);
}
__global__ void make_and_count_seg_sharedAtomic(float *vec, int *bin, int *segcounter, const int length, const int countlength, const int HighLength, const int HighSegmentLength, const int threadsHigh, const int LowSegmentLength, const float low, const float high, const float slope)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int startIndex, SegmentLength, startCountIndex;
extern __shared__ int s_counter[];
startCountIndex = xIndex*countlength;
if ( (xIndex*HighSegmentLength > HighLength) & ( (HighLength + (xIndex-threadsHigh+1)*LowSegmentLength) < length ) ){
startIndex = HighLength + (xIndex-threadsHigh)*LowSegmentLength;
SegmentLength = LowSegmentLength;
}
else {
startIndex = xIndex*HighSegmentLength;
SegmentLength = HighSegmentLength;
}
MakeCountSegment_sharedAtomic(vec+startIndex, bin+startIndex, SegmentLength, segcounter+startCountIndex, s_counter, countlength, low, high, slope);
}
__global__ void segCountSum(int *counter, int *segcounter, const int countlength)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < countlength){
for (int jj=0; jj<countlength; jj++){
counter[xIndex] = counter[xIndex] + segcounter[xIndex + jj*countlength];
}
}
}
__global__ void segCountSum_shared(int *counter, int *segcounter, const int countlength)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ int s_counter[];
if (xIndex < countlength){
for (int jj=0; jj<countlength; jj++){
s_counter[xIndex] = s_counter[xIndex] + segcounter[xIndex + jj*countlength];
}
}
counter[xIndex] = s_counter[xIndex];
}
__global__ void magnitude(float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) { vec[xIndex] = abs(vec[xIndex]); }
}
*/
|
8,610 | #include "includes.h"
__global__ void fill_bspline_4(const float4 *xyzq, const int ncoord, const float *recip, const int nfftx, const int nffty, const int nfftz, int *gix, int *giy, int *giz, float *charge, float *thetax, float *thetay, float *thetaz, float *dthetax, float *dthetay, float *dthetaz) {
// Position to xyzq and atomgrid
unsigned int pos = blockIdx.x*blockDim.x + threadIdx.x;
while (pos < ncoord) {
float4 xyzqi = xyzq[pos];
float x = xyzqi.x;
float y = xyzqi.y;
float z = xyzqi.z;
float q = xyzqi.w;
float w;
// NOTE: I don't think we need the +2.0f here..
w = x*recip[0] + y*recip[1] + z*recip[2] + 2.0f;
float frx = (float)(nfftx*(w - (floorf(w + 0.5f) - 0.5f)));
w = x*recip[3] + y*recip[4] + z*recip[5] + 2.0f;
float fry = (float)(nffty*(w - (floorf(w + 0.5f) - 0.5f)));
w = x*recip[6] + y*recip[7] + z*recip[8] + 2.0f;
float frz = (float)(nfftz*(w - (floorf(w + 0.5f) - 0.5f)));
int frxi = (int)(frx);
int fryi = (int)(fry);
int frzi = (int)(frz);
float wx = frx - (float)frxi;
float wy = fry - (float)fryi;
float wz = frz - (float)frzi;
gix[pos] = frxi;
giy[pos] = fryi;
giz[pos] = frzi;
charge[pos] = q;
float3 theta_tmp[4];
float3 dtheta_tmp[4];
theta_tmp[3].x = 0.0f;
theta_tmp[3].y = 0.0f;
theta_tmp[3].z = 0.0f;
theta_tmp[1].x = wx;
theta_tmp[1].y = wy;
theta_tmp[1].z = wz;
theta_tmp[0].x = 1.0f - wx;
theta_tmp[0].y = 1.0f - wy;
theta_tmp[0].z = 1.0f - wz;
// compute standard b-spline recursion
theta_tmp[2].x = 0.5f*wx*theta_tmp[1].x;
theta_tmp[2].y = 0.5f*wy*theta_tmp[1].y;
theta_tmp[2].z = 0.5f*wz*theta_tmp[1].z;
theta_tmp[1].x = 0.5f*((wx+1.0f)*theta_tmp[0].x + (2.0f-wx)*theta_tmp[1].x);
theta_tmp[1].y = 0.5f*((wy+1.0f)*theta_tmp[0].y + (2.0f-wy)*theta_tmp[1].y);
theta_tmp[1].z = 0.5f*((wz+1.0f)*theta_tmp[0].z + (2.0f-wz)*theta_tmp[1].z);
theta_tmp[0].x = 0.5f*(1.0f-wx)*theta_tmp[0].x;
theta_tmp[0].y = 0.5f*(1.0f-wy)*theta_tmp[0].y;
theta_tmp[0].z = 0.5f*(1.0f-wz)*theta_tmp[0].z;
// perform standard b-spline differentiationa
dtheta_tmp[0].x = -theta_tmp[0].x;
dtheta_tmp[0].y = -theta_tmp[0].y;
dtheta_tmp[0].z = -theta_tmp[0].z;
dtheta_tmp[1].x = theta_tmp[0].x - theta_tmp[1].x;
dtheta_tmp[1].y = theta_tmp[0].y - theta_tmp[1].y;
dtheta_tmp[1].z = theta_tmp[0].z - theta_tmp[1].z;
dtheta_tmp[2].x = theta_tmp[1].x - theta_tmp[2].x;
dtheta_tmp[2].y = theta_tmp[1].y - theta_tmp[2].y;
dtheta_tmp[2].z = theta_tmp[1].z - theta_tmp[2].z;
dtheta_tmp[3].x = theta_tmp[2].x - theta_tmp[3].x;
dtheta_tmp[3].y = theta_tmp[2].y - theta_tmp[3].y;
dtheta_tmp[3].z = theta_tmp[2].z - theta_tmp[3].z;
// one more recursion
theta_tmp[3].x = (1.0f/3.0f)*wx*theta_tmp[2].x;
theta_tmp[3].y = (1.0f/3.0f)*wy*theta_tmp[2].y;
theta_tmp[3].z = (1.0f/3.0f)*wz*theta_tmp[2].z;
theta_tmp[2].x = (1.0f/3.0f)*((wx+1.0f)*theta_tmp[1].x + (3.0f-wx)*theta_tmp[2].x);
theta_tmp[2].y = (1.0f/3.0f)*((wy+1.0f)*theta_tmp[1].y + (3.0f-wy)*theta_tmp[2].y);
theta_tmp[2].z = (1.0f/3.0f)*((wz+1.0f)*theta_tmp[1].z + (3.0f-wz)*theta_tmp[2].z);
theta_tmp[1].x = (1.0f/3.0f)*((wx+2.0f)*theta_tmp[0].x + (2.0f-wx)*theta_tmp[1].x);
theta_tmp[1].y = (1.0f/3.0f)*((wy+2.0f)*theta_tmp[0].y + (2.0f-wy)*theta_tmp[1].y);
theta_tmp[1].z = (1.0f/3.0f)*((wz+2.0f)*theta_tmp[0].z + (2.0f-wz)*theta_tmp[1].z);
theta_tmp[0].x = (1.0f/3.0f)*(1.0f-wx)*theta_tmp[0].x;
theta_tmp[0].y = (1.0f/3.0f)*(1.0f-wy)*theta_tmp[0].y;
theta_tmp[0].z = (1.0f/3.0f)*(1.0f-wz)*theta_tmp[0].z;
// Store theta_tmp and dtheta_tmp into global memory
int pos4 = pos*4;
thetax[pos4] = theta_tmp[0].x;
thetax[pos4+1] = theta_tmp[1].x;
thetax[pos4+2] = theta_tmp[2].x;
thetax[pos4+3] = theta_tmp[3].x;
thetay[pos4] = theta_tmp[0].y;
thetay[pos4+1] = theta_tmp[1].y;
thetay[pos4+2] = theta_tmp[2].y;
thetay[pos4+3] = theta_tmp[3].y;
thetaz[pos4] = theta_tmp[0].z;
thetaz[pos4+1] = theta_tmp[1].z;
thetaz[pos4+2] = theta_tmp[2].z;
thetaz[pos4+3] = theta_tmp[3].z;
dthetax[pos4] = dtheta_tmp[0].x;
dthetax[pos4+1] = dtheta_tmp[1].x;
dthetax[pos4+2] = dtheta_tmp[2].x;
dthetax[pos4+3] = dtheta_tmp[3].x;
dthetay[pos4] = dtheta_tmp[0].y;
dthetay[pos4+1] = dtheta_tmp[1].y;
dthetay[pos4+2] = dtheta_tmp[2].y;
dthetay[pos4+3] = dtheta_tmp[3].y;
dthetaz[pos4] = dtheta_tmp[0].z;
dthetaz[pos4+1] = dtheta_tmp[1].z;
dthetaz[pos4+2] = dtheta_tmp[2].z;
dthetaz[pos4+3] = dtheta_tmp[3].z;
pos += blockDim.x*gridDim.x;
}
} |
8,611 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
__global__ void reduce_kernel(float *a, float *b, int n, int k, int i)
{
int blockNum = blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.y * blockDim.x + threadIdx.x;
int j = blockNum * (blockDim.x * blockDim.y) + threadNum;
for(unsigned int l = 1; l < k; l *= 2)
{
if (j*i %(2*l*i) == 0 && (j+l)*i < n)
a[j*i] += a[(j+l)*i];
__syncthreads();
}
if((j*i)%k == 0)
b[j*i]= a[j*i]/k;
}
int main(void)
{
cudaError_t err = cudaSuccess;
int T = 0;
scanf("%d", &T);
while(T--){
int p,q;
scanf("%d %d",&p, &q);
int n = 2<<(p-1);
int k = 2<<(q-1);
size_t size = n*sizeof(float);
float *h_a = (float*)malloc(size);
if (h_a == NULL)
{
fprintf(stderr, "Failed to allocate host array!\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < n; ++i)
scanf("%f", &h_a[i]);
float *d_a = NULL;
err = cudaMalloc((void **)&d_a, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device array a (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy array a from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int m;
int n2=n;
float *d_b = NULL;
cudaMalloc((void **)&d_b, size);
cudaMemset(d_b, 0, n);
int i=1;
int nt=(int)(p/q);
while(nt--)
{
m = n/k;
dim3 blocksPerGrid(sqrt(m), sqrt(m), 1);
dim3 threadsPerBlock(k, 1, 1);
reduce_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, n2, k, i);
n = m;
d_a = d_b;
i*=k;
}
float *h_b = (float*)malloc(size);
cudaMemcpy(h_b, d_b, size, cudaMemcpyDeviceToHost);
for(int j=0; j<m; j+=k)
printf("%.2f ", h_b[j]);
cudaFree(d_a);
cudaFree(d_b);
free(h_b);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
} |
8,612 | #include "includes.h"
__global__ void rotate_3D(float* coords, size_t dim_z, size_t dim_y, size_t dim_x, float* rot_matrix){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t total = dim_x * dim_y * dim_z;
float new_y = 0, new_x = 0, new_z = 0;
float old_z = coords[index];
float old_y = coords[index + total];
float old_x = coords[index + 2 * total];
if(index < total){
new_z = old_z * rot_matrix[0] + old_y * rot_matrix[3] + old_x * rot_matrix[6];
new_y = old_z * rot_matrix[1] + old_y * rot_matrix[4] + old_x * rot_matrix[7];
new_x = old_z * rot_matrix[2] + old_y * rot_matrix[5] + old_x * rot_matrix[8];
__syncthreads();
coords[index] = new_z;
coords[index + total] = new_y;
coords[index + 2 * total] = new_x;
__syncthreads();
}
} |
8,613 |
/*
#include "SDSphere.cuh"
#include "cuda_runtime.h"
SDSphere::SDSphere(float radius, glm::vec3 position) : radius(radius), position(position)
{
}
inline DistancePrimitive*
SDSphere::copyToDevice()
{
SDSphere* deviceSphere;
cudaMalloc((void **)&deviceSphere, sizeof(SDSphere));
cudaMemcpy(deviceSphere, this, sizeof(SDSphere), cudaMemcpyHostToDevice);
return deviceSphere;
}
inline float
SDSphere::distanceFromPoint(glm::vec3 point)
{
return GLMUtil::length(point - position) - radius;
}
inline AABB
SDSphere::calculateBoundingVolume()
{
return AABB(glm::vec2(position.x - radius, position.y - radius), glm::vec2(position.x + radius, position.y + radius));
}
*/ |
8,614 | #include <stdlib.h>
#include <stdio.h>
// compile with:
// nvcc sgemv.cu -o sgemv
__global__ void
sgemv_rowmajor(int n, float a, float *m, float *x, float *y){
int row = blockIdx.x*blockDim.x + threadIdx.x;
float sum = 0.0;
if (row < n){
for( int col=0; col<n; col++){
sum+= m[row*n+col] * x[col];
}
y[row] = a*sum;
}
}
__global__ void
sgemv_colmajor(int n, float a, float *m, float *x, float *y){
int row = blockIdx.x*blockDim.x + threadIdx.x;
float sum = 0.0;
if (row < n){
for( int col=0; col<n; col++){
sum+= m[col*n+row] * x[col];
}
y[row] = a*sum;
}
}
int main() {
int n=2000;
int memSize = n*sizeof(int);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float *a, *d_a;
a = (float*) malloc (n*sizeof(*a));
cudaMalloc( (void**) &d_a, memSize);
float *b, *d_b;
b = (float*) malloc (n*sizeof(*b));
cudaMalloc( (void**) &d_b, memSize);
float *m, *d_m;
m = (float*) malloc (n*n*sizeof(*b));
cudaMalloc( (void**) &d_m, memSize*n);
for(int j=0; j<n; j++){
a[j] = (float) j;
b[j] = (float) 0;
for(int k=0; k<n; k++)
m[j*n+k] = (float) j+k;
}
float p = 1.0;
cudaMemcpy( d_a, a, memSize, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, b, memSize, cudaMemcpyHostToDevice);
cudaMemcpy( d_m, m, memSize*n, cudaMemcpyHostToDevice);
dim3 block(256);
dim3 grid((n+block.x-1)/(block.x));
cudaEventRecord(start);
sgemv_rowmajor<<<grid,block>>>(n, p, d_m, d_a, d_b);
cudaEventRecord(stop);
cudaMemcpy( b, d_b, memSize, cudaMemcpyDeviceToHost);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("\n\nruntime row-major sgemv [s]: %f\n", milliseconds/1000.0);
printf("\nresult:\n");
for(int j=0; j<10; j++)
printf("%f\n",b[j]);
cudaEventRecord(start);
sgemv_colmajor<<<grid,block>>>(n, p, d_m, d_a, d_b);
cudaEventRecord(stop);
cudaMemcpy( b, d_b, memSize, cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("\n\nruntime col-major sgemv[s]: %f\n", milliseconds/1000.0);
printf("\nresult:\n");
for(int j=0; j<10; j++)
printf("%f\n",b[j]);
cudaFree(d_a);
free(a);
cudaFree(d_b);
free(b);
cudaFree(d_m);
free(m);
return 0;
}
|
8,615 | // Write a CUDA program to multiply two matrices. Input: Matrix 1 size: m X n Matrix 2 size: n X p. Output: Result matrix
// Used the Error Handler function written by Dr. Rama in his Colab shared to us on google classroom
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
__managed__ int m = 5;
__managed__ int n = 5;
__managed__ int p = 5;
int GCD(int a, int b) {
if (!b) return a;
return GCD(b, a % b);
}
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
__global__ void matrixMult(int *a, int *b, int *mul) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(row < m && col < p) {
int t = row * p + col;
mul[t] = 0;
for (int i = 0; i < n; i++) mul[t] += a[row * n + i] * b[i * p + col];
}
}
int main() {
scanf("%d %d %d", &m, &n, &p);
puts(" ");
srand(time(0));
int *a;
int *b;
int *mul;
int *c_a;
int *c_b;
int *c_mul;
a = (int *)malloc(m * n * sizeof(int));
b = (int *)malloc(n * p * sizeof(int));
mul = (int *)malloc(m * p * sizeof(int));
HANDLE_ERROR(cudaMalloc((void **)&c_a, m * n * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **)&c_b, n * p * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void **)&c_mul, m * p * sizeof(int)));
for (int i = 0; i < m; i++) {
for(int j = 0; j < n; j++) {
a[i * n + j] = rand() % 100;
// To see the elements of a uncomment line 66, 68 and 70, if this is 65
// printf("%d ", a[i * n + j]);
}
// puts(" ");
}
// puts(" ");
for (int i = 0; i < n; i++) {
for(int j = 0; j < p; j++) {
b[i * p + j] = rand() % 100;
// To see the elements of b uncomment line 77, 79 and 81, if this is 76
// printf("%d ", b[i * p + j]);
}
// puts(" ");
}
// puts(" ");
HANDLE_ERROR(cudaMemcpy(c_a, a, m * n * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(c_b, b, n * p * sizeof(int), cudaMemcpyHostToDevice));
int gcd = GCD(m, p);
dim3 threads(m / gcd, p / gcd);
dim3 blocks(gcd, gcd);
matrixMult<<<blocks, threads>>>(c_a, c_b, c_mul);
cudaDeviceSynchronize();
HANDLE_ERROR(cudaMemcpy(mul, c_mul, m * p * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < m; i++) {
for(int j = 0; j < p; j++) printf("%d ", mul[i * p + j]);
puts(" ");
}
puts(" ");
free(a);
free(b);
free(mul);
HANDLE_ERROR(cudaFree(c_a));
HANDLE_ERROR(cudaFree(c_b));
HANDLE_ERROR(cudaFree(c_mul));
return 0;
}
|
8,616 | // Created by luozhiwang (luozw1994@outlook.com)
// Date: 2019/12/27
#include <cuda.h>
#include <random>
static void HandleError(cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void histogram_cpu(const unsigned char *array, unsigned int *hist, unsigned int max_len){
int idx;
for (int i = 0; i < max_len; ++i){
idx = array[i] - 'a';
if (0 <= idx && idx < 26){
hist[idx] += 1;
}
}
}
bool is_equal(const unsigned int *hist1, const unsigned int *hist2, const int &max_len){
for (unsigned int i = 0; i < max_len; ++i){
if (hist1[i] != hist2[i]){
return false;
}
}
return true;
}
// 一些参数
const int length = 80;
const int thread_num = 32;
const int per_thread = 2;
const int hist_num = 26;
const int block_num = (length + thread_num * per_thread - 1) / (thread_num * per_thread);
__device__ unsigned int hist_global[hist_num] = {0};
__global__ void histogram(const unsigned char *array, unsigned int max_len){
__shared__ float hist_ds[hist_num];
int pos;
int pre = -1;
int acc = 0;
int tx = threadIdx.x;
int bidx = blockIdx.x;
int bdx = blockDim.x;
int idx = tx + bidx * bdx * per_thread;
for (unsigned int i = tx; i < hist_num; i += bdx){
hist_ds[i] = 0u;
}
__syncthreads();
for (unsigned int i = idx; i < (bidx+1) * bdx * per_thread && i < max_len; i += bdx){
pos = array[i] - 'a';
if (pre != pos){
if (0 <= pre && pre < hist_num){
atomicAdd(&hist_ds[pre], acc);
}
acc = 1;
pre = pos;
}else{
acc +=1;
}
}
if (0 <= pre && pre < hist_num){
atomicAdd(&hist_ds[pre], acc);
}
__syncthreads();
for (unsigned int i = tx; i < hist_num; i += bdx){
atomicAdd(&hist_global[i], hist_ds[i]);
}
}
int main(int args, char **argv){
printf("Block num is %d\nThread num is %d\n",block_num, thread_num);
// Definition
float elapsed_time;
char tmp[26];
unsigned char *array_host = new unsigned char[length];
unsigned int *hist_host = new unsigned int [hist_num];
unsigned int *hist_cpu = new unsigned int [hist_num];
unsigned char *array_dev;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaMalloc((void**)&array_dev, sizeof(char) * length));
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
// Init Host ====> Dev
for (int i = 0; i < 26; ++i){
tmp[i] = 'a' + i;
}
std::default_random_engine e;
std::uniform_int_distribution<int> distribution(0, 26);
for (int i = 0; i < length; ++i){
array_host[i] = tmp[distribution(e)];
}
HANDLE_ERROR(cudaMemcpy(array_dev, array_host, sizeof(char) * length, cudaMemcpyHostToDevice));
// launch kernel
HANDLE_ERROR(cudaEventRecord(start, 0));
histogram<<<block_num, thread_num>>>(array_dev, length);
printf("Histogram \n");
HANDLE_ERROR(cudaEventRecord(stop, 0));
// elapsed time
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsed_time ,start, stop));
printf("Elapsed Time is %f \n",elapsed_time);
// Dev ====> Host
HANDLE_ERROR(cudaMemcpyFromSymbol(hist_host, hist_global, sizeof(int) * hist_num));
// verify the output
histogram_cpu(array_host, hist_cpu, length);
if (is_equal(hist_host, hist_cpu, hist_num)){
printf("Answer is Correct\n");
}else{
printf("Answer is Wrong\n");
for (int i = 0; i < hist_num; ++i){
printf("%d %d %d \n", i, hist_host[i], hist_cpu[i]);
}
}
// Destroy
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
HANDLE_ERROR(cudaFree(array_dev));
free(array_host);
free(hist_host);
free(hist_cpu);
} |
8,617 |
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/***********************************************************************
*******
Demonstrates how to crack an encrypted password using a simple
"brute force" algorithm. Works on passwords that consist only of 2
uppercase
letters and a 2 digit integer. Your personalised data set is included
in the
code.
Compile with:
nvcc -o CrackAZ99-With-Cuda CrackAZ99-With-Cuda.cu
If you want to analyse the results then use the redirection operator
to send
output to a file that you can view using an editor or the less
utility:
./CrackAZ99-With-Cuda > results.txt
Dr Kevan Buckley, University of Wolverhampton, 2018
************************************************************************
******/
__device__ int is_a_match(char *attempts) { // Compares each password attempt.
char plain_passwords1[] = "IT8312";
char plain_passwords2[] = "RB3211";
char plain_passwords3[] = "AV7213";
char plain_passwords4[] = "ES2114";
char *x = attempts;
char *p = plain_passwords1;
char *q = plain_passwords2;
char *r = plain_passwords3;
char *s = plain_passwords4;
while(*x == *p) {
if(*x== '\0') {
printf("Found password successfully: %s\n",plain_passwords1);
break;
}
x++;
p++;
}
while(*x == *q) {
if(*x== '\0') {
printf("Found password successfully: %s\n",plain_passwords2);
break;
}
x++;
q++;
}
while(*x == *r) {
if(*x== '\0') {
printf("Found password successfully: %s\n",plain_passwords3);
break;
}
x++;
r++;
}
while(*x == *s) {
if(*x== '\0') {
printf("Found password successfully: %s\n",plain_passwords4);
return 1;
}
x++;
s++;
}
return 0;
}
__global__ void kernel() {
char k,l,m,n;
char password[7];
password[6] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstValue = i;
char secondValue = j;
password[0] = firstValue;
password[1] = secondValue;
for(k='0'; k<='9'; k++){
for(l='0'; l<='9'; l++){
for(m='0'; m<='9'; m++){
for(n='0'; n<='9'; n++){
password[2] = k;
password[3] = l;
password[4] = m;
password[5] = n;
if(is_a_match(password)) {
//printf("Success");
}
else {
//printf("tried: %s\n", password);
}
}
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
// starting kernel
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26, 26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
8,618 | #include <cassert>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <numeric>
#include <vector>
using namespace std;
#define TILE_SIZE 64
#define MAX_MASK_WIDTH 5
__constant__ float c_M[MAX_MASK_WIDTH];
__global__
void convolution1(const float* N, const float* M, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0.0f;
int N_start_point = i - (mask_width/2);
for (int j = 0; j < mask_width; ++j) {
if (N_start_point + j >= 0 && N_start_point + j < width) {
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
__global__
void convolution2(const float* N, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0.0f;
int N_start_point = i - (mask_width / 2);
for (int j = 0; j < mask_width; ++j) {
if (N_start_point + j >= 0 && N_start_point + j < width) {
Pvalue += N[N_start_point + j] * c_M[j];
}
}
P[i] = Pvalue;
}
__global__
void convolution3(const float* N, float* P, int mask_width, int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float Nds[TILE_SIZE + MAX_MASK_WIDTH - 1];
// load N from global memory into shared memory
int n = mask_width/2;
if (threadIdx.x >= blockDim.x - n) {
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
Nds[threadIdx.x - (blockDim.x - n)] = (halo_index_left < 0) ? 0 : N[halo_index_left];
}
Nds[n + threadIdx.x] = N[i];
if (threadIdx.x < n) {
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
Nds[n + blockDim.x + threadIdx.x] = (halo_index_right >= width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0.0f;
for (int j = 0; j < mask_width; ++j) {
Pvalue += Nds[threadIdx.x + j]*c_M[j];
}
P[i] = Pvalue;
}
int main(int argc, char* argv[])
{
// Query GPU properties
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, 0);
cout << "---------------------------------------------" << endl;
cout << " GPU PROPERTIES " << endl;
cout << "---------------------------------------------" << endl;
cout << "Device Name: " << dev_prop.name << endl;
cout << "Memory Clock Rate: " << dev_prop.memoryClockRate/1.0e6 << " GHz" << endl;
cout << "Memory Bandwidth: " << 2.0*dev_prop.memoryClockRate*(dev_prop.memoryBusWidth/8)/1.0e6 << " GB/s" << endl;
cout << "Number of SM: " << dev_prop.multiProcessorCount << endl;
cout << "Max Threads per SM: " << dev_prop.maxThreadsPerMultiProcessor << endl;
cout << "Registers per Block: " << dev_prop.regsPerBlock << endl;
cout << "Shared Memory per Block: " << dev_prop.sharedMemPerBlock << " B" << endl;
cout << "Total Global Memory per Block: " << dev_prop.totalGlobalMem/1.0e9 << " GB" << endl;
cout << endl;
int size = atoi(argv[1]);
// creating vector on host side
vector<float> h_N(size, 1.0f);
std::iota(h_N.begin(), h_N.end(), 0.0f);
// Copy vector on device side
float* d_N;
cudaMalloc((void**)&d_N, size*sizeof(float));
cudaMemcpy((void*)d_N, (void*)h_N.data(), size*sizeof(float), cudaMemcpyHostToDevice);
// Create mask and send to devide
vector<float> h_M = { 1.0f, 1.0f, 2.0f, 1.0f, 1.0f };
int mask_width = h_M.size();
assert(mask_width < MAX_MASK_WIDTH);
cudaMemcpyToSymbol(c_M, (void*)h_M.data(), mask_width*sizeof(float));
// Allocate space for solution on device
float* d_P;
cudaMalloc((void**)&d_P, size*sizeof(float));
// call Kernel
int blockDim = TILE_SIZE;
int gridDim = ceil(size/(float)blockDim);
int version = atoi(argv[2]);
if(version == 1)
convolution2<<<gridDim, blockDim>>>(d_N, d_P, mask_width, size);
else if(version == 2)
convolution3<<<gridDim, blockDim>>>(d_N, d_P, mask_width, size);
else
cout << "Wrong inputs!" << endl;
// Recover vector from device to host
vector<float> h_P(size);
cudaMemcpy((void*)h_P.data(), (void*)d_P, size*sizeof(float), cudaMemcpyDeviceToHost);
// Finalize storage
cudaFree(d_N);
cudaFree(d_P);
cout << "Closing..." << endl;
return 0;
}
|
8,619 | #include "includes.h"
__global__ void kernel(float *a, int offset)
{
int i = offset + threadIdx.x + blockIdx.x*blockDim.x;
float x = (float)i;
float s = sinf(x);
float c = cosf(x);
a[i] = a[i] + sqrtf(s*s+c*c);
} |
8,620 | #include <stdio.h>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__ void add(float* x, float* y)
{
y[threadIdx.x] = x[threadIdx.x] + y[threadIdx.x];
}
int main(void)
{
//Variables
int N = 1024;
float* x, * y;
int size = sizeof(float) * N;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
//Initialize x and y arrays on the host using unified pointers.
for (int i = 0; i < N; i++) {
x[i] = 1.0;
y[i] = 2.0;
}
//Launch kernel on N elements on the GPU
add <<<1, N >>> (x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0)
float maxError = 0.0;
for (int i = 0; i < N; i++)
maxError = (float)fmax(maxError, fabs(y[i] - 3.0));
printf("Max error: %lf\n", maxError);
// Free cuda memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
8,621 | #include "defines.cuh"
#include "function_defines.cuh"
/*__device__ __host__
void ViewPlane::BindPBOResource(cudaGraphicsResource *resource){
//this->pbo_resource = cuda_pbo_resource;
}*/
|
8,622 | __global__ void _AFFINE_KERNEL(int* ,int ,int ,int ,int ,int);
#define MIN(a,b) (((a)<(b))?(a):(b))
#include<cuda.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<time.h>
int main(int argc, char** argv)
{
int N = 1000;
int N_NODES = 100;
int data = 1;
int _NTHREAD = 1, _NBLOCK = 1;
char* readfile, *outfile;
if(argc>1) _NTHREAD = atoi(argv[1]);
if(argc>2) _NBLOCK = atoi(argv[2]);
if(argc>3) data = atoi(argv[3]) + 1;
if(argc>4) readfile = argv[4];
int i,j;
//srand(time(NULL));
FILE* f;
f = fopen(readfile, "r");
j=0;
char c;
while(1){
c = fgetc(f);
if(c=='\n') {
j++;
c = fgetc(f);
if(c!='%') break;
}
}
fscanf(f, "%d", &N_NODES);
//printf("---------%d ----------",N_NODES);
fscanf(f, "%d", &N_NODES);
fscanf(f, "%d", &N);
if(2*N<_NTHREAD*_NBLOCK) {
printf("%d\n",_NTHREAD*_NBLOCK);
fclose(f);
return 0;
}
struct timespec start, end, mid_start, mid_end;
double runTime, pre_time, post_time, computeTime;
outfile = (char*)malloc(sizeof(char)*(strlen(readfile)+10));
strcpy(outfile, readfile);
strcat(outfile, ".data");
FILE* fp;
fp = fopen(outfile, "a");
int q[N],x;
for (i = 0; i < N; i++)
{
fscanf(f, "%d", &q[i]);
fscanf(f, "%d", &x);
}
clock_gettime(CLOCK_MONOTONIC, &start);
int _SZ_q_1 = N;
int *_DEV_q;
cudaMalloc((void**) &_DEV_q, sizeof(int)*_SZ_q_1);
cudaMemcpy(_DEV_q, q, sizeof(int)*_SZ_q_1, cudaMemcpyHostToDevice);
int _NUM_THREADS = N,_NUM_BLOCKS=1;
int _NUM_TILE=1;
dim3 _THREADS(512);
dim3 _BLOCKS(1);
if(_NUM_THREADS < _NTHREAD)
{
_THREADS.x=_NUM_THREADS;
}
else {
_THREADS.x=_NTHREAD;
_NUM_BLOCKS=(_NUM_THREADS % _NTHREAD == 0)?(_NUM_THREADS/_NTHREAD):((_NUM_THREADS/_NTHREAD)+1);
if(_NUM_BLOCKS<_NBLOCK)
_BLOCKS.x=_NUM_BLOCKS;
else {
_BLOCKS.x=_NBLOCK;
int temp=_NUM_BLOCKS;
_NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1);
}
}
int ID_1, ID_2, START[1];
int _CUDA_TILE;
int Phi[1]={2};
int loopUpperLimits[1]={N};
clock_gettime(CLOCK_MONOTONIC, &mid_start);
for(ID_1=1;ID_1<=N/2+1;ID_1++)
{
for(ID_2=0;ID_2<1;ID_2++)
{
if(Phi[ID_2]>=0)
START[ID_2]=(ID_1-1)*Phi[ID_2];
else
START[ID_2]=loopUpperLimits[ID_2]+(ID_1-1)*Phi[ID_2];
}
for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++)
{
_AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_q, _SZ_q_1, START[0], MIN(START[0]+2, N), _CUDA_TILE, N);
cudaDeviceSynchronize();
}
}
clock_gettime(CLOCK_MONOTONIC, &mid_end);
cudaMemcpy(q, _DEV_q, sizeof(int)*_SZ_q_1, cudaMemcpyDeviceToHost);
cudaFree(_DEV_q);
clock_gettime(CLOCK_MONOTONIC, &end);
pre_time = (double) ((((&mid_start)->tv_sec * 1000000000) + (&mid_start)->tv_nsec) - (((&start)->tv_sec * 1000000000) + (&start)->tv_nsec)) / 1000000000;
post_time = (double) ((((&end)->tv_sec * 1000000000) + (&end)->tv_nsec) - (((&mid_end)->tv_sec * 1000000000) + (&mid_end)->tv_nsec)) / 1000000000;
computeTime = (double) ((((&mid_end)->tv_sec * 1000000000) + (&mid_end)->tv_nsec) - (((&mid_start)->tv_sec * 1000000000) + (&mid_start)->tv_nsec)) / 1000000000;
runTime = (double) ((((&end)->tv_sec * 1000000000) + (&end)->tv_nsec) - (((&start)->tv_sec * 1000000000) + (&start)->tv_nsec)) / 1000000000;
printf("********************************\n");
fprintf(fp,"%d,%d,%d,%d,%d,%.14f,%.14f,%.14f,%.14f,%d\n",N,_NTHREAD*_NBLOCK,_THREADS.x,_BLOCKS.x,data,pre_time,computeTime,post_time,runTime,_CUDA_TILE);
//fprintf(fp,"%d,%d,%.14f\n",N_EDGES,data,runTime);
//fclose(fp);
printf("RUN TIME: %.14f\n", runTime);
fclose(fp);
fclose(f);
return 0;
}
__global__ void _AFFINE_KERNEL(int* q,int _SZ_q_1,int CUDA_L_i,int CUDA_U_i, int _CUDA_TILE, int N)
{
int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x;
if((CUDA_L_i<=i)&&(i<=CUDA_U_i)){
q[N-i]=q[N-i+2];
}}
|
8,623 | #ifdef __cplusplus
extern "C" {
#endif
__global__ void mandelbrot(int* A, const int N, const int largeur, const int hauteur, const int start_hauteur, const int end_hauteur){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int y = idx / hauteur;
int x = idx - (y * largeur);
if (y < (end_hauteur-start_hauteur) && x < largeur)
{
int cpt = 0;
float x1 = 0.;
float y1 = 0.;
float x2 = 0.;
float y2 = 0.;
float a = 4. * x / largeur - 2.;
float b = 4. * (y+start_hauteur) / hauteur - 2.;
float val = x1* x1 + y1 * y1;
while (cpt < N && val <= 4.)
{
cpt ++;
x2 = x1* x1 - y1 * y1 + a;
y2 = 2. * x1 * y1 + b;
x1 = x2;
y1 = y2;
val = x1* x1 + y1 * y1;
}
A[y*hauteur+x] = cpt;
}
}
#ifdef __cplusplus
}
#endif
|
8,624 | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include "cuda.h"
__global__ void foo(int* p) {
__shared__ int x[32];
int *ptr_p = p + threadIdx.x;
int *ptr_x = x + threadIdx.x;
}
|
8,625 | #include <stdio.h>
#include <iostream>
#include <unistd.h>
#include <sys/time.h>
using namespace std;
// Shorthand for formatting and printing usage options to stderr
#define fpe(msg) fprintf(stderr, "\t%s\n", msg);
// Shorthand for handling CUDA errors.
#define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) )
/**
* DEFINED VALUES HERE
*/
#define TILE_WIDTH 32
#define TILE_HEIGHT 4
#define TILE_DEPTH 1
#define PER_THREAD_X 1
#define PER_THREAD_Y 1
#define PER_THREAD_Z 1
/*****************
* CUDA Utilites *
*****************/
void HandleError(cudaError_t err, const char *file, int line) {
//
// Handle and report on CUDA errors.
//
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
void checkCUDAError(const char *msg, bool exitOnError) {
//
// Check cuda error and print result if appropriate.
//
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
if (exitOnError) {
exit(-1);
}
}
}
void cleanupCuda(void) {
//
// Clean up CUDA resources.
//
//
// Explicitly cleans up all runtime-related resources associated with the
// calling host thread.
//
HANDLE_ERROR(cudaThreadExit());
}
/*********************
* End CUDA Utilites *
*********************/
struct Args {
bool debug;
bool sequential;
bool blocked;
bool overlapped;
// Data attributes
int size, dimensions, alloc_size;
int xSize, ySize, zSize;
int xBlockSize, yBlockSize, zBlockSize, tBlockSize;
// Run attributes
int grid_size, block_count, thread_count, iterations;
};
void usage(char *prog_name, string msg) {
if (msg.size() > 0) {
fputs(msg.c_str(), stderr);
}
fprintf(stderr, "%s\n", prog_name);
fprintf(stderr, "Options are:\n");
fpe("-n<size> Set data size (default: 1024)");
fpe("-d<dims> Set number of data dimensions (1, 2, or 3) (default: 2)");
fpe("-g<size> Set grid size");
fpe("-b<num> Set block count");
fpe("-t<num> Set thread count");
fpe("-i<iter> Number of iterations to perform (default: 1000)");
fpe("-x<size> X Dimension");
fpe("-y<size> Y Dimension");
fpe("-z<size> Z Dimension");
fpe("-T<size> T Dimension");
fpe("-S Execute sequential, CPU version");
fpe("-B Execute blocked sequential, CPU version");
fpe("-O Execute sequential overlapped tiling, CPU version");
fpe("-D Print debug info");
fpe("-h Print usage info (this message)");
exit(EXIT_FAILURE);
}
Args parse_arguments(int argc, char *argv[]) {
Args args = Args();
int opt;
// Parse args
while ((opt = getopt(argc, argv, "n:d:g:b:t:i:x:y:z:T:hSBOD")) != -1) {
switch (opt) {
case 'D':
args.debug = true;
break;
case 'S':
args.sequential = true;
break;
case 'B':
args.blocked = true;
break;
case 'O':
args.overlapped = true;
break;
case 'n':
args.size = atoi(optarg);
break;
case 'd':
args.dimensions = atoi(optarg);
break;
case 'g':
args.grid_size = atoi(optarg);
break;
case 'b':
args.block_count = atoi(optarg);
break;
case 't':
args.thread_count = atoi(optarg);
break;
case 'i':
args.iterations = atoi(optarg);
break;
case 'x':
args.xBlockSize = atoi(optarg);
break;
case 'X':
args.xSize = atoi(optarg);
break;
case 'y':
args.yBlockSize = atoi(optarg);
break;
case 'Y':
args.ySize = atoi(optarg);
break;
case 'z':
args.zBlockSize = atoi(optarg);
break;
case 'Z':
args.zSize = atoi(optarg);
break;
case 'T':
args.tBlockSize = atoi(optarg);
break;
case 'h':
usage(argv[0], "");
break;
default:
usage(argv[0], "Unrecognized option\n");
}
}
// check sizes
if (args.size <= 0) {
cout << "Data size must be larger than 0" << endl;
exit(EXIT_FAILURE);
}
if (args.dimensions <= 0 || args.dimensions >= 4) {
cerr << "Data must be 1, 2, or 3 dimensions" << endl;
exit(EXIT_FAILURE);
}
// Calculations
if (args.dimensions == 1) {
args.alloc_size = args.size;
} else if (args.dimensions == 2) {
args.alloc_size = args.size * args.size;
} else {
args.alloc_size = args.size * args.size * args.size;
}
if (args.thread_count > 0) {
args.block_count = args.alloc_size / args.thread_count;
} else if (args.block_count > 0) {
args.thread_count = args.alloc_size / args.block_count;
} else {
args.thread_count = 16;
args.block_count = args.alloc_size / args.thread_count;
}
return args;
}
typedef struct {
int dimensions;
int height;
int width;
int depth;
float *elements;
} Matrix;
Matrix initialize_matrix(int dimensions, int width, int height = 1, int depth =
1) {
Matrix data;
if (dimensions == 3 && width > 1 && height > 1 && depth > 1) {
data.width = width;
data.height = height;
data.depth = depth;
data.elements = (float *) malloc(
width * height * depth * sizeof(float));
for (int z = 0; z < depth; z++) {
// X = 0 & N planes
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x += width - 1) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
// Y = 0 & N planes
for (int y = 0; y < height; y += height - 1) {
for (int x = 0; x < width; x++) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
}
// Z = 0 & N planes
for (int z = 0; z < depth; z += depth - 1) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
}
} else {
fprintf(stderr, "Improper dimension or size.");
exit(1);
}
return data;
}
/****************
* CUDA KERNELS *
****************/
__global__ void cached_plane_shared_mem(Matrix data, Matrix result) {
int threadCol = threadIdx.x;
int threadRow = threadIdx.y;
int threadDep = threadIdx.z;
int blockCol = blockIdx.x;
int blockRow = blockIdx.y;
int blockDep = blockIdx.z;
// Indexes so we don't have to recompute them.
int globalIndex[PER_THREAD_Z][PER_THREAD_Y][PER_THREAD_X];
int globalX[PER_THREAD_X];
int globalY[PER_THREAD_Y];
int globalZ[PER_THREAD_Z];
int sharedX[PER_THREAD_X];
int sharedY[PER_THREAD_Y];
int sharedZ[PER_THREAD_Z];
// Shared and local data arrays
__shared__ float shared[TILE_DEPTH + 2][TILE_HEIGHT + 2][TILE_WIDTH + 2];
float local[PER_THREAD_Z][PER_THREAD_Y][PER_THREAD_X];
/*
* Calculate indexes into the global and shared arrays
*/
// X shared and global
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
sharedX[x] = threadCol + blockDim.x * x + 1;
globalX[x] = blockCol * TILE_WIDTH + sharedX[x] - 1;
}
// Y shared and global
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
sharedY[y] = threadRow + blockDim.y * y + 1;
globalY[y] = blockRow * TILE_HEIGHT + sharedY[y] - 1;
}
// Z shared and global
#pragma unroll
for (int z = 0; z < PER_THREAD_Z; z++) {
sharedZ[z] = threadDep + blockDim.z * z + 1;
globalZ[z] = blockDep * TILE_DEPTH + sharedZ[z] - 1;
}
//
// Global absolute index
#pragma unroll
for (int z = 0; z < PER_THREAD_Z; z++) {
int zTemp = globalZ[z] * data.width * data.height;
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
int yTemp = globalY[y] * data.width;
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
globalIndex[z][y][x] = globalX[x] + yTemp + zTemp;
}
}
}
/*
* Copy into shared memory
*/
#pragma unroll
for (int z = 0; z < PER_THREAD_Z; z++) {
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
shared[sharedZ[z]][sharedY[y]][sharedX[x]] =
data.elements[globalIndex[z][y][x]];
}
}
}
// Copy below-block dependencies into shared memory
if (threadRow == 0 && blockRow > 0) {
#pragma unroll
for (int z = 0; z < PER_THREAD_Z; z++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
shared[sharedZ[z]][0][sharedX[x]] =
data.elements[globalIndex[z][0][x] - data.width];
}
}
}
// Copy above-block dependencies into shared memory
if (threadRow == blockDim.y - 1
&& (blockRow + 1) * TILE_HEIGHT < data.height - 1) {
#pragma unroll
for (int z = 0; z < PER_THREAD_Z; z++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
shared[sharedZ[z]][TILE_HEIGHT + 1][sharedX[x]] =
data.elements[globalIndex[z][PER_THREAD_Y - 1][x]
+ data.width];
}
}
}
// Copy left-of-block dependencies into shared memory
if (threadCol == 0 && blockCol > 0) {
#pragma unroll
for (int z = 0; z < PER_THREAD_Z; z++) {
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
shared[sharedZ[z]][sharedY[y]][0] =
data.elements[globalIndex[z][y][0] - 1];
}
}
}
// Copy right-of-block dependencies into shared memory
if (threadCol == blockDim.x - 1
&& (blockCol + 1) * TILE_WIDTH < data.width) {
#pragma unroll
for (int z = 0; z < PER_THREAD_Z; z++) {
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
shared[sharedZ[z]][sharedY[y]][TILE_WIDTH + 1] =
data.elements[globalIndex[z][y][PER_THREAD_X - 1] + 1];
}
}
}
// Copy in-front-of-block dependencies into shared memory
if (threadDep == 0 && blockDep > 0) {
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
shared[0][sharedY[y]][sharedX[x]] =
data.elements[globalIndex[0][y][x]
- data.width * data.height];
}
}
}
// Copy behind-block dependencies into shared memory
if (threadDep == blockDim.z - 1
&& (blockDep + 1) * TILE_DEPTH < data.depth) {
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
shared[TILE_DEPTH + 1][sharedY[y]][sharedX[x]] =
data.elements[globalIndex[PER_THREAD_Z - 1][y][x]
+ data.width * data.height];
}
}
}
__syncthreads();
/*
* Calculate Values - we are only using the z dimension as the
* x and y should be set to a value of 1 to calculate the data for the current plane of threads
* Z goes from 0 to the size of the block of threads to make sure we have enough shared memory to
* do the current set of calculations.
*/
for (int z = 0; z < PER_THREAD_Z; z++) {
int globZ = globalZ[z];
int sharZ = sharedZ[z];
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
int globY = globalY[y];
int sharY = sharedY[y];
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > 0 && globX < data.width - 1 && globY > 0
&& globY < data.height - 1 && globZ > 0
&& globZ < data.depth - 1) {
// Calculate new value
local[z][y][x] = (shared[sharZ][sharY][sharX]
+ shared[sharZ][sharY][sharX - 1]
+ shared[sharZ][sharY][sharX + 1]
+ shared[sharZ][sharY - 1][sharX]
+ shared[sharZ][sharY + 1][sharX]
+ shared[sharZ - 1][sharY][sharX]
+ shared[sharZ + 1][sharY][sharX]) / 7;
} else if (globX == 0 || globX == data.width - 1 || globY == 0
|| globY == data.height - 1 || globZ == 0
|| globZ == data.depth - 1) {
// On the edge
local[z][y][x] = shared[sharZ][sharY][sharX];
} else {
// Beyond the edge, shouldn't ever hit this unless we messed something up
}
}
}
}
__syncthreads();
#pragma unroll
for (int z = 0; z < PER_THREAD_Z; z++) {
#pragma unroll
for (int y = 0; y < PER_THREAD_Y; y++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_X; x++) {
result.elements[globalIndex[z][y][x]] = local[z][y][x];
}
}
}
}
/********************
* END CUDA KERNELS *
********************/
//
// Initialize the dimensions of the Matrix object that contain the data
//
Matrix initialize_device(Matrix A, bool copyToDevice) {
Matrix deviceA;
deviceA.width = A.width;
deviceA.height = A.height;
deviceA.depth = A.depth;
deviceA.dimensions = A.dimensions;
size_t sizeA = A.width * A.height * A.depth * sizeof(float);
HANDLE_ERROR(cudaMalloc((void ** ) &deviceA.elements, sizeA));
if (copyToDevice) {
HANDLE_ERROR(
cudaMemcpy(deviceA.elements, A.elements, sizeA,
cudaMemcpyHostToDevice));
}
return deviceA;
}
void callKernel(Args args, Matrix A, Matrix B) {
Matrix deviceA, deviceB;
deviceA = initialize_device(A, true);
deviceB = initialize_device(B, false);
// cudaDeviceSetLimit(cudaLimitPrintfFifoSize,size); - include this to control behavior of L1 cache
dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1),
args.size/TILE_DEPTH);
dim3 threads(TILE_WIDTH, TILE_HEIGHT, 1);
for (int t = 0; t < args.iterations; t++) {
cached_plane_shared_mem<<<blocks, threads>>>(deviceA, deviceB);
checkCUDAError("cached_plane_shared_mem", true);
swap(deviceA, deviceB);
}
HANDLE_ERROR(
cudaMemcpy(B.elements, deviceA.elements,
A.width * A.height * A.depth * sizeof(float),
cudaMemcpyDeviceToHost));
}
// Data output
void print_data(float *data, int size, int dimensions) {
if (size > 20) {
cerr << "Data too big to print\n" << endl;
return;
}
if (dimensions == 1) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[x]);
}
} else if (dimensions == 2) {
for (int y = 0; y < size; y++) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[y * size + x]);
}
cout << endl;
}
} else if (dimensions == 3) {
for (int z = 0; z < size; z++) {
for (int y = 0; y < size; y++) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[z * size * size + y * size + x]);
}
cout << endl;
}
cout << endl;
}
}
cout << endl << endl;
}
// Main
int main(int argc, char *argv[]) {
Args args = parse_arguments(argc, argv);
float runtime;
struct timeval start, end;
Matrix A, B;
A = initialize_matrix(args.dimensions, args.size, args.size, args.size);
B = initialize_matrix(args.dimensions, args.size, args.size, args.size);
// atexit(cleanupCuda);
gettimeofday( &start, NULL );
callKernel(args, A, B);
gettimeofday( &end, NULL );
runtime = ( ( end.tv_sec - start.tv_sec ) * 1000.0 ) + ( ( end.tv_usec - start.tv_usec ) / 1000.0 );
printf( "Processing Time: %4.4f milliseconds\n", runtime );
if (args.debug) {
print_data(B.elements, args.size, args.dimensions);
}
}
|
8,626 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define _EPSILON 0.001
#define _ABS(x) ( x > 0.0f ? x : -x )
__host__ int allclose(float* A, float* B, int len)
{
int returnval = 0;
for (int i = 0; i < len; i++)
{
if ( _ABS(A[i] - B[i]) > _EPSILON )
returnval = -1;
break;
}
return returnval;
}
// row-column dot-product for matrix multiplication.
__device__ float rowcol_dot(float * matrix_a, float * matrix_b, int row, int col, int N)
{
float val = 0;
for (int k = 0; k < N; k++)
{
val += matrix_a[row*N + k] * matrix_b[col + k*N];
}
return val;
}
// Matrix multiplication kernel that is parallelized over row/column tuples.
__global__ void matrix_mult_ker(float * matrix_a, float * matrix_b, float * output_matrix, int N)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
output_matrix[col + row * N] = rowcol_dot(matrix_a, matrix_b, row, col, N);
}
__host__ int main()
{
cudaSetDevice(0);
int N = 4;
int num_bytes = sizeof(float) * N * N;
float h_A[] = {1.0, 2.0, 3.0, 4.0, \
1.0, 2.0, 3.0, 4.0, \
1.0, 2.0, 3.0, 4.0, \
1.0, 2.0, 3.0, 4.0};
float h_B[] = {14.0, 13.0, 12.0, 11.0, \
14.0, 13.0, 12.0, 11.0, \
14.0, 13.0, 12.0, 11.0, \
14.0, 13.0, 12.0, 11.0};
float h_AxB[] = {140.0, 130.0, 120.0, 110.0, \
140.0, 130.0, 120.0, 110.0, \
140.0, 130.0, 120.0, 110.0, \
140.0, 130.0, 120.0, 110.0};
float* d_A;
float* d_B;
float* d_output;
cudaMalloc((float **) &d_A, num_bytes);
cudaMalloc((float **) &d_B, num_bytes);
cudaMemcpy(d_A, h_A, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, num_bytes, cudaMemcpyHostToDevice);
cudaMalloc((float**) &d_output, num_bytes);
float* h_output;
h_output = (float*)malloc(num_bytes);
dim3 block(2, 2, 1);
dim3 grid(2, 2, 1);
matrix_mult_ker <<< grid, block >>> (d_A, d_B, d_output, N);
cudaDeviceSynchronize();
cudaMemcpy(h_output, d_output, num_bytes, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_output);
cudaDeviceReset();
if(allclose(h_AxB, h_output, N*N) < 0)
{
printf("Error! Output of kernel does not match expected output.\n");
free(h_output);
return -1;
}
else
{
printf("Success! Output of kernel matches expected output.\n");
free(h_output);
return 0;
}
}
|
8,627 | #include <iostream>
using namespace std;
void addVectors(int size, int * a, int * b, int * dest) {
for (int i=0; i < size; ++i) {
dest[i] = a[i] + b[i];
}
}
int main() {
int N = 100000000;
int * a = new int[N]; // declared on heap so no segfault
int * b = new int[N];
int * sum = new int[N];
for (int i = 0; i < N; ++i) {
a[i] = i*i;
b[i] = i*i;
}
addVectors(N, a, b, sum);
if (sum[10] == 200) {
return 0;
}
else {
cout << "SEMANTIC ERROR" << endl;
}
} |
8,628 | #include <iostream>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
__global__ void add(int *a, int *b, int *c)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
c[index] = a[index] + b[index];
}
int main(int argc, char *argv[])
{
const int N = 2*3*4*2*3*4; // 576;
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int nbytes = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, nbytes);
cudaMalloc((void **)&d_b, nbytes);
cudaMalloc((void **)&d_c, nbytes);
// Alloc space for host copies of a, b, c and setup input values
a = new int[N];
b = new int[N];
c = new int[N];
// Initialise a and b to a simple arithmetic sequence
for (int i = 0; i < N; i++) { a[i] = i; b[i] = i; }
// Copy inputs to device
cudaMemcpy(d_a, a, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, nbytes, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with N blocks
add<<<24,24>>>(d_a, d_b, d_c);
// No need for a cudaDeviceSynchronize here: CUDA operations issued
// to the same stream (here the default one) *always* serialize.
// Copy result back to host
cudaMemcpy(c, d_c, nbytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) { std::cout << c[i] << ','; }
std::cout << '\n';
// Cleanup
delete [] a; delete [] b; delete [] c;
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
8,629 | #include "includes.h"
__global__ void transposedMatrixKernel(int* d_a, int* d_b) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
while (i < N) {
j = threadIdx.y + blockDim.y * blockIdx.y;
while (j < N) {
d_b[i * N + j] = d_a[j * N + i];
j += blockDim.y * gridDim.y;
}
i += blockDim.x * gridDim.x;
}
} |
8,630 | #include<stdio.h>
#include<iostream>
using namespace std;
__global__ void add_neighbour(int *A) {
int tid = threadIdx.x;
/* int a = A[tid + offset];
printf("%d, %d\n", tid, a);
__syncthreads();
A[tid + offset] = A[tid];
__syncthreads();
A[tid] = A[tid + offset];
printf("%d, %d\n",tid ,A[tid]);*/
int a = A[tid + 2];
__syncthreads();
A[tid] += a;//A[tid + offset];
}
const int N = 2;
const int threadsPerBlock = 2;
const int blockPerGrid = 1;
int main(){
int* A, *devA;
A = new int[N];
cudaMalloc((void **) &devA, sizeof(int) * N);
for (int i = 0; i < N; i++)
A[i] = i;
cudaMemcpy(devA, A, N * sizeof(int), cudaMemcpyHostToDevice);
add_neighbour<<<blockPerGrid, threadsPerBlock>>>(devA);
cudaMemcpy(A, devA, N * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0 ; i < N; i++)
printf("%d \n", A[i]);
return 1;
}
|
8,631 | extern "C" __global__ void probe_hashtable(int *S, int S_size, int *hash_table, int ht_size,int* OUT) {//OUT and S have the same size
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = offset; i < S_size && i < ht_size; i += stride) {
int key = S[i];
int hash = key & (ht_size - 1);
if (key == hash_table[hash]) {
OUT[i] = key;
}else {
OUT[i] = -1;
}
}
} |
8,632 | #include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/sequence.h>
#include <thrust/random.h>
#include <thrust/gather.h>
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include <stdio.h>
using namespace thrust::placeholders;
/****************************************************/
/* POWER DIFFERENCE FUNCTOR FOR EUCLIDEAN DISTANCES */
/****************************************************/
struct PowerDifference {
__host__ __device__ float operator()(const float& a, const float& b) const {
if ( a == 0.0f || b == 0.0f) {
return 0.0f;
} else {
return pow(a - b, 2);
}
}
};
struct countIfNoZeros {
__host__ __device__ float operator()(const float& a, const float& b) const {
if ( a > 0.0f && b > 0.0f) {
return 1.0f;
} else {
return 0.0f;
}
}
};
/*******************/
/* EXPAND OPERATOR */
/*******************/
template <typename InputIterator1, typename InputIterator2, typename OutputIterator>
OutputIterator expand(InputIterator1 first1,
InputIterator1 last1,
InputIterator2 first2,
OutputIterator output)
{
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0), thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(), output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output; thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2, output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
/********/
/* MAIN */
/********/
int main()
{
/**************************/
/* SETTING UP THE PROBLEM */
/**************************/
const int N = 20; // --- Number of vector elements
const int Nvec = 3; // --- Number of vectors for each matrix
// --- Random uniform integer distribution between 0 and 100
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(0, 20);
// --- Matrix allocation and initialization
thrust::device_vector<float> d_matrix1(Nvec * N);
thrust::device_vector<float> d_matrix2(Nvec * N);
d_matrix1[0] = 0;
d_matrix1[1] = 1;
d_matrix1[2] = 2;
d_matrix1[3] = 3;
d_matrix1[4] = 5;
d_matrix1[5] = 0;
d_matrix1[6] = 0;
d_matrix1[7] = 0;
d_matrix1[8] = 0;
d_matrix1[9] = 0;
d_matrix1[10] = 0;
d_matrix1[11] = 0;
d_matrix1[12] = 0;
d_matrix1[13] = 0;
d_matrix1[14] = 0;
d_matrix1[15] = 0;
d_matrix1[16] = 0;
d_matrix1[17] = 0;
d_matrix1[18] = 0;
d_matrix1[19] = 0;
d_matrix1[20] = 0;
d_matrix1[21] = 2;
d_matrix1[22] = 2;
d_matrix1[23] = 3;
d_matrix1[24] = 0;
d_matrix1[25] = 5;
d_matrix1[26] = 0;
d_matrix1[27] = 0;
d_matrix1[28] = 0;
d_matrix1[29] = 0;
d_matrix1[30] = 0;
d_matrix1[31] = 1;
d_matrix1[32] = 0;
d_matrix1[33] = 0;
d_matrix1[34] = 0;
d_matrix1[35] = 0;
d_matrix1[36] = 0;
d_matrix1[37] = 0;
d_matrix1[38] = 0;
d_matrix1[39] = 0;
d_matrix1[40] = 5;
d_matrix1[41] = 5;
d_matrix1[42] = 5;
d_matrix1[43] = 1;
d_matrix1[44] = 0;
d_matrix1[45] = 0;
d_matrix1[46] = 0;
d_matrix1[47] = 0;
d_matrix1[48] = 0;
d_matrix1[49] = 0;
d_matrix1[50] = 0;
d_matrix1[51] = 0;
d_matrix1[52] = 0;
d_matrix1[53] = 0;
d_matrix1[54] = 0;
d_matrix1[55] = 0;
d_matrix1[56] = 0;
d_matrix1[57] = 0;
d_matrix1[58] = 0;
d_matrix1[59] = 0;
d_matrix2[0] = 0;
d_matrix2[1] = 1;
d_matrix2[2] = 2;
d_matrix2[3] = 3;
d_matrix2[4] = 5;
d_matrix2[5] = 0;
d_matrix2[6] = 0;
d_matrix2[7] = 0;
d_matrix2[8] = 0;
d_matrix2[9] = 0;
d_matrix2[10] = 0;
d_matrix2[11] = 0;
d_matrix2[12] = 0;
d_matrix2[13] = 0;
d_matrix2[14] = 0;
d_matrix2[15] = 0;
d_matrix2[16] = 0;
d_matrix2[17] = 0;
d_matrix2[18] = 0;
d_matrix2[19] = 0;
d_matrix2[20] = 0;
d_matrix2[21] = 1;
d_matrix2[22] = 2;
d_matrix2[23] = 3;
d_matrix2[24] = 5;
d_matrix2[25] = 0;
d_matrix2[26] = 0;
d_matrix2[27] = 0;
d_matrix2[28] = 0;
d_matrix2[29] = 0;
d_matrix2[30] = 0;
d_matrix2[31] = 0;
d_matrix2[32] = 0;
d_matrix2[33] = 0;
d_matrix2[34] = 0;
d_matrix2[35] = 0;
d_matrix2[36] = 0;
d_matrix2[37] = 0;
d_matrix2[38] = 0;
d_matrix2[39] = 0;
d_matrix2[40] = 0;
d_matrix2[41] = 1;
d_matrix2[42] = 2;
d_matrix2[43] = 3;
d_matrix2[44] = 5;
d_matrix2[45] = 0;
d_matrix2[46] = 0;
d_matrix2[47] = 0;
d_matrix2[48] = 0;
d_matrix2[49] = 0;
d_matrix2[50] = 0;
d_matrix2[51] = 0;
d_matrix2[52] = 0;
d_matrix2[53] = 0;
d_matrix2[54] = 0;
d_matrix2[55] = 0;
d_matrix2[56] = 0;
d_matrix2[57] = 0;
d_matrix2[58] = 0;
d_matrix2[59] = 0;
printf("\n\nFirst matrix\n");
for(int i = 0; i < Nvec; i++) {
std::cout << " [ ";
for(int j = 0; j < N; j++)
std::cout << d_matrix1[i * N + j] << " ";
std::cout << "]\n";
}
printf("\n\nSecond matrix\n");
for(int i = 0; i < Nvec; i++) {
std::cout << " [ ";
for(int j = 0; j < N; j++)
std::cout << d_matrix2[i * N + j] << " ";
std::cout << "]\n";
}
/****************************************************************************/
/* CALCULATING THE EUCLIDEAN DISTANCES BETWEEN THE ROWS OF THE TWO MATRICES */
/****************************************************************************/
// --- Creating the indices for the reduction by key
thrust::device_vector<int> d_sequence(Nvec);
thrust::device_vector<int> d_indices(Nvec * N);
thrust::device_vector<int> d_counts(Nvec, N);
thrust::sequence(d_sequence.begin(), d_sequence.begin() + Nvec);
expand(d_counts.begin(), d_counts.end(), d_sequence.begin(), d_indices.begin());
printf("\n\nIndex matrix\n");
for(int i = 0; i < Nvec; i++) {
std::cout << " [ ";
for(int j = 0; j < N; j++)
std::cout << d_indices[i * N + j] << " ";
std::cout << "]\n";
}
thrust::device_vector<float> d_devnull(Nvec);
thrust::device_vector<float> d_squared_differences(Nvec * N);
thrust::transform(d_matrix1.begin(), d_matrix1.end(), d_matrix2.begin(), d_squared_differences.begin(), PowerDifference());
thrust::device_vector<float> d_norms(Nvec);
thrust::reduce_by_key(d_indices.begin(), d_indices.end(), d_squared_differences.begin(), d_devnull.begin(), d_norms.begin());
thrust::device_vector<float> d_cuenta(Nvec * N);
thrust::transform(d_matrix1.begin(), d_matrix1.end(), d_matrix2.begin(), d_cuenta.begin(), countIfNoZeros());
thrust::device_vector<float> d_dividendo(Nvec);
thrust::reduce_by_key(d_indices.begin(), d_indices.end(), d_cuenta.begin(), d_devnull.begin(), d_dividendo.begin());
thrust::device_vector<float> d_distancias_euclidianas(Nvec);
thrust::transform(d_norms.begin(), d_norms.end(), d_dividendo.begin(), d_distancias_euclidianas.begin(), thrust::divides<float>());
printf("\n\nDistancia Euclidiana \n");
for(int i = 0; i < Nvec; i++) {
// std::cout << (d_norms[i]/d_dividendo[i]) << " ";
std::cout << d_norms[i] << "/" << d_dividendo[i] << "=" << d_distancias_euclidianas[i] << " \n";
}
thrust::device_vector<int> user_index(Nvec);
thrust::sequence(user_index.begin(), user_index.end(), 0, 1);
thrust::sort_by_key(user_index.begin(), user_index.end(), d_distancias_euclidianas.begin());
std::cout << "La menor distancias es :" << d_distancias_euclidianas[1] << " del usuario " << user_index[1]<< " \n";
return 0;
}
|
8,633 | #include "stdlib.h"
#include "stdio.h"
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include "math.h"
#define BLOCK_SIZE 32
__global__
void swap_cols_kernel(double *a, int col1_idx, int col2_idx, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
for (int row = idx; row < n; row+=offset) {
double tmp = a[n*idx+col1_idx];
a[n*idx+col1_idx] = a[n*idx+col2_idx];
a[n*idx+col2_idx] = tmp;
}
}
__global__
void coef_mul_and_sub_kernel(double *a, int fst_col_idx, int fst_row_idx, double *coefs, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int offset_x = gridDim.x * blockDim.x;
int offset_y = gridDim.y * blockDim.y;
for (int i = fst_col_idx + idx; i < n; i+=offset_x) {
for (int j = fst_row_idx+idy; j < n; j+=offset_y) {
int diag_elem_idx = n*j+fst_col_idx-1;
a[n*j + i] -=coefs[i]*a[diag_elem_idx];
}
}
}
struct compare_abs_value
{
__host__ __device__
bool operator()(double a, double b) {
return (a<0? -a:a) < ( b<0? -b:b);
}
};
void print_matrix(double *a, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
printf("%.10e ", a[j*n+i]);
printf("\n");
}
}
__global__
void set_cur_row_elements_kernel(double* a, double* coefs, int n, int row, int fst_col_idx) {
int idx = blockDim.x*blockIdx.x + threadIdx.x;
int offset = blockDim.x*gridDim.x;
for (int col = idx+fst_col_idx; col < n; col+=offset) {
coefs[col] = a[n*row+col]/a[n*row+fst_col_idx-1];
a[n*row+col] = coefs[col];
}
}
int main() {
int n;
scanf("%d", &n);
double* a = (double*)malloc(sizeof(double)*n*n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
scanf("%lf", &a[j*n+i]);
}
int size = sizeof(double)*n*n;
double* d_a;
cudaMalloc(&d_a, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
int* swap_vector = (int*)malloc(sizeof(int)*n);
double *d_coefs;
double *coefs = (double*)malloc(sizeof(double)*n);
cudaMalloc(&d_coefs, n*sizeof(double));
for (int row = 0; row < n-1; row++) {
thrust::device_ptr<double> d_ptr = thrust::device_pointer_cast(&d_a[row*n+row]);
thrust::device_ptr<double> d_row_begin_ptr = thrust::device_pointer_cast(&d_a[row*n]);
thrust::device_ptr<double> max_elem_ptr = thrust::max_element(d_ptr, d_row_begin_ptr + n, compare_abs_value());
int max_elem_idx = max_elem_ptr - d_row_begin_ptr;
swap_vector[row] = max_elem_idx;
swap_cols_kernel<<<(n+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>>(d_a, row, max_elem_idx, n);
set_cur_row_elements_kernel<<<((n-row-1)+BLOCK_SIZE-1)/BLOCK_SIZE, BLOCK_SIZE>>>(d_a, d_coefs, n, row, row+1);
cudaMemcpy(coefs, d_coefs, sizeof(double)*n, cudaMemcpyDeviceToHost);
dim3 dimGrid = dim3(((n-row-1)+BLOCK_SIZE-1)/BLOCK_SIZE, ((n-row-1)+BLOCK_SIZE-1)/BLOCK_SIZE);
dim3 dimBlock = dim3(BLOCK_SIZE, BLOCK_SIZE);
coef_mul_and_sub_kernel<<<dimGrid, dimBlock>>>(d_a, row+1, row+1, d_coefs, n);
}
swap_vector[n-1] = n-1;
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
print_matrix(a, n);
int* p = (int*)malloc(sizeof(int)*n);
for (int i = 0; i < n; i++)
p[i] = i;
for (int i = n-1; i >= 0; i--) {
int tmp = p[i];
p[i] = p[swap_vector[i]];
p[swap_vector[i]] = tmp;
}
for (int i = 0; i < n; i++)
printf("%d ", p[i]);
cudaFree(d_a);
cudaFree(coefs);
free(a);
free(p);
free(swap_vector);
return 0;
}
|
8,634 | extern "C" __global__ void
mmkernel( float* a, float* b, float* c,
int pitch_a, int pitch_b, int pitch_c,
int n, int m, int p )
{
// This is a shared memory version of k1.
int tx = threadIdx.x;
int i = blockIdx.x*32 + tx;
int j = blockIdx.y;
__shared__ float cb[32];
float sum = 0.0;
for( int ks = 0; ks < p; ks += 32 ){
cb[tx] = c[ks+tx+pitch_c*j];
for( int k = ks; k < ks+32; ++k )
sum += b[i+pitch_b*k] * cb[k-ks];
}
a[i+pitch_a*j] = sum;
}
|
8,635 | #include<stdio.h>
#define N (1024*1024)
__global__ void local_sum(int *g_A, int *g_B){
__shared__ int s_A[1024];
s_A[threadIdx.x]=g_A[threadIdx.x+1024*blockIdx.x];
__syncthreads();
for(int i=512;i>0;i>>=1){
if(threadIdx.x<i){
s_A[threadIdx.x]+=s_A[threadIdx.x+i];
}
__syncthreads();
}
if(threadIdx.x==0){
g_B[blockIdx.x]=s_A[0];
}
}
int main()
{
int *h_A, *h_C;
int *d_A, *d_B, *d_C;
int ans;
h_A = (int*)malloc(N*sizeof(int));
h_C = (int*)malloc(sizeof(int));
ans = 0;
for (int i = 0; i < N; i++){
h_A[i] = 1;
ans += h_A[i];
}
cudaMalloc((void**)&d_A, N*sizeof(int));
cudaMalloc((void**)&d_B, 1024*sizeof(int));
cudaMalloc((void**)&d_C, sizeof(int));
cudaMemcpy(d_A,h_A,N*sizeof(int),cudaMemcpyHostToDevice);
local_sum<<<1024, 1024>>>(d_A, d_B);
local_sum<<< 1, 1024>>>(d_B, d_C);
cudaMemcpy(h_C,d_C,sizeof(int),cudaMemcpyDeviceToHost);
printf("%d %d\n",ans,h_C[0]);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_C);
} |
8,636 | #include "includes.h"
__device__ double caculateValueOfWeight(double parameter, int sign, double alpha)
{
return (parameter*sign*alpha);
}
__global__ void updateWeights(double* weights, double* parameters,double* otherp, int sign, double alpha)
{
int index = threadIdx.x;
double value = weights[index];
weights[index] = value + caculateValueOfWeight( parameters[index], sign, alpha);
} |
8,637 | // includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime_api.h>
#define cudaCheckError(code) \
{ \
if((code) != cudaSuccess) { \
fprintf(stderr, "Cuda failure %s:%d: '%s' \n",__FILE__,__LINE__ , \
cudaGetErrorString(code)); \
} \
}
__global__ void kernel_1d(){
int index = blockIdx.x * blockDim.x + threadIdx.x;
printf("1D indexing demonstration");
printf("block %d,blockdim %x,thread %d,index %d\n",blockIdx.x,blockDim.x,threadIdx.x,index);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
__global__ void kernel_2d(){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
printf("2D indexing demonstration");
printf("blockidx.x %d blockidx.y %d\n ",blockIdx.x,blockIdx.y);
printf("blockdim.x %d blockdim.y %d\n ",blockDim.x,blockDim.y);
printf("block.x %d,blockdim.x %x,thread.x %d,x %d\n",blockIdx.x,blockDim.x,threadIdx.x,x);
printf("block.y %d,blockdim.y %x,thread.y %d,y %d\n",blockIdx.y,blockDim.y,threadIdx.y,y);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
int main(){
kernel_1d<<<4,8>>>();
kernel_2d<<<(2,3),(3,4)>>>();
cudaCheckError(cudaDeviceSynchronize());
}
|
8,638 | // This example code is from "http://blog.naver.com/PostView.nhn?blogId=sogangori&logNo=220580711355"
// Reduction code to find the max value from array
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void FindMaxCUDA(int *src, int *max_dst, int length){
extern __shared__ int sm[]; // Allocating the shared memory dynamically
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
if(i < length){
sm[tid] = src[i];
__syncthreads();
for(int s = 1; s < blockDim.x; s *= 2){
if (tid%(2*s)==0){
if(sm[tid] < sm[tid+s]) sm[tid] = sm[tid+s];
}
__syncthreads();
}
if(tid==0) max_dst[0] = sm[0]; // If you want to get max value using `divide-and-conquer`,
// the index for max_dst should be changed to `blockIdx.x`.
// And you may call the kernel once again for finding the max value from max_dst array.
}
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = {1,2,5,4,3};
int max[1] = {0};
int *src_d;
int *max_d;
cudaMalloc((void**)&src_d, sizeof(int)*arraySize);
cudaMalloc((void**)&max_d, sizeof(int)*1);
cudaMemcpy(src_d, a, sizeof(int)*arraySize, cudaMemcpyHostToDevice);
// NOTE: the third parameter is used for dynamically allocating shared memory in the kernel.
FindMaxCUDA<<<1, arraySize, arraySize*sizeof(int)>>>(src_d, max_d, arraySize);
cudaMemcpy(max, max_d, sizeof(int), cudaMemcpyDeviceToHost);
printf("max = %d\n", max[0]);
return 0;
}
|
8,639 | #include<stdio.h>
__global__
void cpTest(int *x) {
int idx = threadIdx.x;
printf( "At id: %d, Val: %d", idx, x[idx] );
}
int main(void) {
int foo[5] = { 1, 2, 3, 4, 5 };
int *deviceFoo;
cudaMalloc( (void **)&deviceFoo, 5 * sizeof(int) );
cudaMemcpy( deviceFoo, foo, 5 * sizeof(int), cudaMemcpyHostToDevice );
cpTest<<<1, 5>>>(deviceFoo);
fflush(stdout);
}
|
8,640 | #include "includes.h"
__global__ void Convolution(double* A, double* B, int I, int J)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
double c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.1;
if (i>J && i<I*J-J && (i%J!=0) && ((i+1)%J!=0)) {
B[i] = c11 * A[i-J-1] + c12 * A[i-1] + c13 * A[i+J-1]
+ c21 * A[i-J] + c22 * A[i] + c23 * A[i+J]
+ c31 * A[i-J+1] + c32 * A[i+1] + c33 * A[i+J+1];
}
} |
8,641 | #include <iostream>
__device__ int counter = 0;
__global__ void increment()
{
counter++;
}
__global__ void print()
{
printf("counter = %d\n", counter);
}
int main()
{
const int blockSize=1024;
const int gridSize=1024;
increment<<<blockSize, gridSize>>>();
print<<<1,1>>>();
cudaDeviceSynchronize();
}
|
8,642 | // Created by luozhiwang (luozw1994@outlook.com)
// Date: 2020/1/3
#include <cuda.h>
#include <random>
const int m = 1000;
const int n = 1048;
static void HandleError(cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
bool verify_output(int *array1, int *array2, int len){
for (int i = 0; i < len; ++i){
if (array1[i] != array2[i]){
return false;
}
}
return true;
}
__device__ __host__
void merge(int *array1, int len1, int *array2, int len2, int *output){
int i{0};
int j{0};
int k{0};
while(i < len1 && j < len2){
if (array1[i] <= array2[j]){
output[k++] = array1[i++];
}else{
output[k++] = array2[j++];
}
}
if (i == len1){
while (j < len2){
output[k++] = array2[j++];
}
}else{
while (i < len1){
output[k++] = array1[i++];
}
}
}
// 书中所使用的算法,每次都移动下限
__device__ __host__
int co_rank_aux(int k, int *A, int m, int *B, int n){
int i = k < m ? k : m ;
int j = k - i;
int i_low = (k - n) < 0 ? 0 : k - n;
int j_low = (k - m) < 0 ? 0 : k - m;
int delta;
while (true){
if(i > 0 && j < n && A[i-1]>B[j]){
delta = (i - i_low + 1) >> 1;
j_low = j;
i -= delta;
j += delta;
}else if(j > 0 && i < m && A[i] <= B[j-1]){
delta = (j - j_low + 1) >> 1;
i_low = i;
i += delta;
j -= delta;
}else{
break;
}
}
return i;
}
// 书中每次用了一个delta来控制,每次移动都是选择移动i或者j的下限,感觉没必要,因为k=i+j,我就直接使用标准的二分查找了
__device__ __host__
int co_rank(int k, int *A, int m, int *B, int n){
int i_max = k < m - 1? k : m - 1;
int i_min = k < n ? 0 : k - n ;
while (i_min < i_max){
int i = (i_max + i_min + 1) / 2;
int j = k - i;
if (i > 0 && j < n && A[i - 1] > B[j]){
i_max = i - 1;
}else if (j > 0 && i < m && A[i] <= B[j - 1]){
i_min = i + 1;
}else{
break;
}
}
return (i_max + i_min + 1) / 2;
}
__global__ void merge_co_rank(int *array1, int m, int *array2, int n, int *output){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int section_size = (m + n - 1) / (blockDim.x * gridDim.x) + 1;
int start_k = tid * section_size;
int end_k = min((tid + 1) * section_size, m + n);
int start_i = co_rank(start_k, array1, m, array2, n);
int end_i = co_rank(end_k, array1, m, array2, n);
int start_j = start_k - start_i;
int end_j = end_k - end_i;
merge(&array1[start_i], end_i - start_i, &array2[start_j], end_j - start_j, &output[start_k]);
}
void show(int *array, int num, std::string str=""){
printf("%s\n", str.c_str());
for(int i = 0; i < num; ++i){
printf("%d ", array[i]);
}
printf("\n");
}
void init_order(int *array, int num, int seed = 1){
std::default_random_engine e;
e.seed(seed);
std::uniform_real_distribution<float> prob(0, 1);
int i = 0;
int count = 0;
while (i < num){
if (prob(e) < 0.5){
array[i++] = count;
}
++count;
}
}
int main(int args, char **argv){
int *array1 = new int [m];
int *array2 = new int [n];
int *merge_cpu = new int [m + n];
int *output_cpu = new int [m + n];
init_order(array1, m, 1);
init_order(array2, n,2);
int *array1_dev, *array2_dev, *output_dev;
cudaEvent_t start, end;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&end));
HANDLE_ERROR(cudaMalloc((void**)&array1_dev, sizeof(int) * m));
HANDLE_ERROR(cudaMalloc((void**)&array2_dev, sizeof(int) * n));
HANDLE_ERROR(cudaMalloc((void**)&output_dev, sizeof(int) * (m + n)));
HANDLE_ERROR(cudaMemcpy(array1_dev, array1, sizeof(int) * m, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(array2_dev, array2, sizeof(int) * n, cudaMemcpyHostToDevice));
dim3 grid(2);
dim3 block(16);
merge(array1, m, array2, n, merge_cpu);
HANDLE_ERROR(cudaEventRecord(start, 0));
merge_co_rank<<<grid, block>>>(array1_dev, m, array2_dev, n, output_dev);
// merge_co_rank<<<grid, block>>>(array2_dev, n, array1_dev, m, output_dev);
HANDLE_ERROR(cudaEventRecord(end, 0));
HANDLE_ERROR(cudaEventSynchronize(end));
float elapsed_time;
HANDLE_ERROR(cudaEventElapsedTime(&elapsed_time, start, end));
printf("Elapsed Time is %f \n",elapsed_time);
show(array1, m,"array1 ===>" );
show(array2, n,"array2 ===>");
HANDLE_ERROR(cudaMemcpy(output_cpu, output_dev, sizeof(int) * (m+n), cudaMemcpyDeviceToHost));
if (verify_output(output_cpu, merge_cpu, m + n)){
printf("Answer is Correct\n");
} else{
printf("Answer is Wrong\n");
show(merge_cpu, m+n, "output_cpu ===>");
show(output_cpu, m+n, "output_device ===>");
}
delete []array1;
delete []array2;
delete []output_cpu;
delete []merge_cpu;
HANDLE_ERROR(cudaFree(array1_dev));
HANDLE_ERROR(cudaFree(array2_dev));
HANDLE_ERROR(cudaFree(output_dev));
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(end));
} |
8,643 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <chrono>
#include <stdio.h>
using namespace std;
const int N = 10000;
__global__ void addition(int *c, int *a, int *b, const int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < N )
{
c[i] = a[i] + b[i];
}
}
int generateRandomNum(bool bigger)
{
if (!bigger)
{
return rand() % 10; // range from 0 to 10
}
else
{
return rand() % 100; // range from 0 to 10
}
}
void initVec(int a[N], const int N, bool bigger=false)
{
cout << "input array: ";
for (int i = 0; i < N; i++)
{
a[i] = generateRandomNum(bigger);
//cout << a[i] << ", ";
}
cout << endl << "------------" << endl;
}
void showArray(int *a, const int N)
{
cout << "output array: ";
for (size_t i = 0; i < N; i++)
{
cout << a[i] << ", ";
}
cout << endl << "------------" << endl;
}
void arrayAdditionOnCPU(int *c, int *a, int *b, const int N)
{
for (size_t i = 0; i < N; i++)
{
c[i] = a[i] + b[i];
}
}
int main()
{
// host data
int a[N], b[N], c[N], d[N];
initVec(a, N);
initVec(b, N, true);
int size = N * sizeof(int);
// device data
int *d_a, *d_b, *d_c;
// allocate space for devece data
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// copy data to device from host
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// define threads and blocks
dim3 threadPerBlock(2); // just 1 thread in a block
dim3 blockSize(N / threadPerBlock.x); // 2 blocks
cudaEvent_t start, stop;
float deviceTimeCost;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
addition<<<blockSize, threadPerBlock>>>(d_c, d_a, d_b, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&deviceTimeCost, start, stop); // friendly warning here returns in millisecond
//destroy all event
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// free device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout << "Time Consumed on GPU: " << deviceTimeCost << " ms." << endl;
auto hostStart = chrono::steady_clock::now();
arrayAdditionOnCPU(d, a, b, N);
auto hostEnd = chrono::steady_clock::now();
float hostTimeCost = chrono::duration_cast<chrono::duration<float> >(hostEnd - hostStart).count(); // in seconds
cout << "Time Consumed on Host: " << hostTimeCost << " s." << endl;
cout << "who is faster: " << deviceTimeCost / (1000*1000*hostTimeCost) << endl;
//showArray(c, N);
system("pause");
return 0;
} |
8,644 |
#include <type_traits>
int main(int argc, char** argv)
{
// Verify that issue #17519 Setting CXX_STANDARD breaks CUDA_STANDARD
// selection via cxx_std_11 has been corrected
using returnv = std::integral_constant<int, 0>;
return returnv::value;
}
|
8,645 | #include "includes.h"
__global__ void rectify(unsigned char * original_img, unsigned char* new_img, unsigned int num_thread, unsigned int size) {
for (int i = threadIdx.x; i < size; i = i + num_thread) {
if (original_img[i] < 127)
new_img[i] = 127;
else
new_img[i] = original_img[i];
}
} |
8,646 | #include "includes.h"
__global__ void sqr_mag_kernel(const float *data, float *result, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
if (idx / 2 < total) {
result[idx] = data[idx] * data[idx] + data[idx + 1] * data[idx + 1];
result[idx + 1] = 0;
}
} |
8,647 | // nvcc suma.cu -o v && ./v
#include <bits/stdc++.h>
using namespace std;
#define THREADS_PER_BLOCK 1024 //depende de la arquitectura
//#define THREADS_PER_BLOCK 16
#define threadsPB 8
void random_ints(int **&M, int rows, int cols){
for (int i =0; i < rows; ++i){
for (int j =0; j < cols; ++j){
M[i][j] = 1+rand()%10;
}
}
}
void resize(int **&M,int rows, int cols){
M = (int **) malloc(rows * sizeof(int*)) ;
for(int i = 0; i<rows; i++) {
M[i] = (int *) malloc(cols * sizeof(int));
}
}
void imprimir(int **&M, int rows, int cols){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
cout<<M[i][j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
//void createMatrixHostCUDA(int**& host, int**& device, int **& aux, int size, int rows, int cols ){
void createMatrixHostCUDA(int**& device, int rows, int cols ){
//aux =(int **)malloc(rows*sizeof(int*));
//cudaMalloc((void **)&aux[0],size);
cudaMalloc((void **)&device,rows*sizeof(int*));
//for (int i=1; i<rows;++i){
// aux[i]=aux[i-1]+cols;
//}
//cudaMemcpy(device, aux, rows*sizeof(int*), cudaMemcpyHostToDevice);
}
//=================cuda=================
__global__ void sum(int **A, int **B, int **R, int rows, int cols){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i<rows && j<cols){
R[i][j] = A[i][j] + B[i][j];
}
}
void cuda_suma(int **h_A, int **h_B, int **h_R, int rows, int cols ){
int **d_A, **d_B, **d_R; //device copias
//int **a_aux, **b_aux, **c_aux;
int size = rows * cols * sizeof(int*);
//dar memoria en GPU
//createMatrixHostCUDA(h_A,d_A,a_aux,size,rows,cols);
//createMatrixHostCUDA(h_B,d_B,b_aux,size,rows,cols);
//createMatrixHostCUDA(h_R,d_R,c_aux,size,rows,cols);
createMatrixHostCUDA(d_A,rows,cols);
createMatrixHostCUDA(d_B,rows,cols);
createMatrixHostCUDA(d_R,rows,cols);
//copiar HOST -> DEVICE
//cudaMemcpy(a_aux[0], h_A[0], size, cudaMemcpyHostToDevice);
//cudaMemcpy(b_aux[0], h_B[0], size, cudaMemcpyHostToDevice);
//run kernel //almenos debe contener un bloque
dim3 threadPerBlock(threadsPB, threadsPB);
dim3 blockPerGrid((rows+threadPerBlock.x-1)/threadPerBlock.x,(cols+threadPerBlock.y-1)/threadPerBlock.y);
sum<<<blockPerGrid,threadPerBlock>>>(d_A,d_B,d_R,rows,cols);
//sum<<<1,512>>>(d_A,d_B,d_R,rows,cols);
//=====
//=====
//copiar DEVICE -> HOST
cudaMemcpy(h_R,d_R, size, cudaMemcpyDeviceToHost);
//free(h_A); free(h_B); free(h_R);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_R);
//cudaFree(a_aux); cudaFree(b_aux);cudaFree(c_aux);
}
//======================================
int main(){
int rows = 8;
int cols = 8;
int **A, **B, **R;
resize(A,rows,cols);
resize(B,rows,cols);
resize(R,rows,cols);
random_ints(A,rows,cols);
random_ints(B,rows,cols);
cuda_suma(A,B,R,rows,cols);
imprimir(A,rows,cols);
imprimir(B,rows,cols);
imprimir(R,rows,cols);
}
/*
int main(){
int rows=row;
int cols=column;
//srand (time(NULL));
int **h_A, **h_B, **h_R;
int **d_A, **d_B, **d_R;
int **a_aux, **b_aux, **c_aux;
int size = row* column * sizeof(int*);
createMatrixHostCUDA(h_A,d_A,a_aux,size,row,column);
createMatrixHostCUDA(h_B,d_B,b_aux,size,row,column);
createMatrixHostCUDA(h_R,d_R,c_aux,size,row,column);
random_ints(h_A,rows,cols);
random_ints(h_B,rows,cols);
cudaMemcpy(a_aux[0], h_A[0], size, cudaMemcpyHostToDevice);
cudaMemcpy(b_aux[0], h_B[0], size, cudaMemcpyHostToDevice);
dim3 threadPerBlock(threadsPB, threadsPB);
dim3 blockPerGrid((rows+threadPerBlock.x-1)/threadPerBlock.x,(cols+threadPerBlock.y-1)/threadPerBlock.y);
//scalarMult<<<blockPerGrid,threadPerBlock>>>(d_A,2,d_R);
Multi<<<blockPerGrid,threadPerBlock>>>(d_A,d_B,d_R);
cudaMemcpy(h_R[0],c_aux[0], size, cudaMemcpyDeviceToHost);
print(h_A,rows,cols);
print(h_B,rows,cols);
print(h_R,rows,cols);
free(h_A); free(h_B); free(h_R);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_R);
cudaFree(a_aux[0]);cudaFree(c_aux[0]);
return 0;
}*/ |
8,648 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void scalarMult(double2* in, double factor, double2* out){
double2 result;
//extern __shared__ double2 tmp_in[];
unsigned int gid = getGid3d3d();
result.x = (in[gid].x * factor);
result.y = (in[gid].y * factor);
out[gid] = result;
} |
8,649 |
__device__ __constant__ int3 c_ImageSize;
__device__ __constant__ float3 c_ImageOrigin;
__device__ __constant__ float3 c_DetectorOrigin;
__device__ __constant__ float2 c_DetectorSize;
__global__ void tt_perspective_positionField_gpu_kernel(float4 *positionFieldArray)
{
const int tid_x= blockIdx.x*blockDim.x + threadIdx.x;
const int tid_y= blockIdx.y*blockDim.y + threadIdx.y;
if(tid_x<c_ImageSize.x)
{
if(tid_y<c_ImageSize.z)
{
int3 imageSize = c_ImageSize;
float3 imageOrigin = c_ImageOrigin;
float3 detectorOrigin = c_DetectorOrigin;
float2 detectorSize = c_DetectorSize;
short3 voxelIndex;
int out_index = imageSize.x * tid_y + tid_x;
//int out_index = imageSize.z * tid_x + tid_y;
/* The transformation is applied */
float4 position;
for(int z_index=0; z_index<imageSize.y; z_index++)
{
float z_prime = tid_x + imageOrigin.z;
float z_ratio = z_prime / detectorOrigin.z;
float x_prime = tid_x + detectorOrigin.x * z_ratio;
float y_prime = tid_y + detectorOrigin.y * z_ratio;
position.x = x_prime;
position.y = y_prime;
position.z = z_prime;
position.w = 0.0f;
/* the deformation field (real coordinates) is stored */
positionFieldArray[out_index] = position;
}
}
}
}
|
8,650 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void itop_kernel(const uint8_t* const in, uint8_t* out, const int data_len, const int thread_len)
{
int offset = (blockDim.x * blockIdx.x * thread_len) + (blockIdx.x * thread_len);
uint8_t* out_y = out + offset;
uint8_t* out_b = out_y + (data_len / 3);
uint8_t* out_r = out_b + (data_len / 3);
for(int i = offset; i < thread_len + offset && i < data_len;)
{
*out_y++ = in[i++];
*out_b++ = in[i++];
*out_r++ = in[i++];
}
}
__global__ void ptoi_kernel(const uint8_t * const in_y,
const uint8_t* const in_b,
const uint8_t* const in_r,
uint8_t* out, int data_len, int thread_len)
{
int oset_main=(blockDim.x*blockIdx.x*thread_len)+(threadIdx.x*thread_len);
for(int i=oset_main,j=oset_main;i<thread_len+oset_main&&i<data_len;++j)
{
out[i] = in_y[j];
++i;
out[i] = in_b[j];
++i;
out[i] = in_r[j];
++i;
}
}
uint8_t* cuda_interstitial_to_planar(uint8_t* data, int data_len)
{
if(data_len%3)
{
printf("Error: data_len must be a multiple of 3\n");
return NULL;
}
int channel_len = data_len/3;
int num_threads = channel_len/256;
uint8_t* h_out = (uint8_t*)malloc(data_len);
uint8_t* d_in, *d_out;
cudaMalloc((void**)&d_in,data_len);
cudaMalloc((void**)&d_out,data_len);
cudaMemcpy(d_in,data,data_len,cudaMemcpyHostToDevice);
printf("3 blocks and %d threads\n",num_threads);
printf("thread len: %d\n",data_len/(3*num_threads));
itop_kernel<<<3,num_threads>>>(d_in,d_out,data_len,data_len/(3*num_threads));
cudaMemcpy(h_out,d_out,data_len,cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
return h_out;
}
#ifdef __cplusplus
}
#endif
|
8,651 | #include <fstream>
#include <iostream>
#include <vector>
using namespace std;
struct myNumber {
long value;
bool prime;
};
__global__ void kernel(myNumber *numbers, int size);
__device__ bool isPrime(long number);
void printResults(vector<myNumber> numbers);
vector<myNumber> readNumbers(ifstream &inputFile);
int main(int argc, char **argv) {
if (argc != 2 ) {
cout << "Invalid number of arguments!" << endl;
return EXIT_FAILURE;
}
ifstream inputFile(argv[1]);
if (!inputFile.is_open()) {
cout << "File doesn't exist: " << argv[1] << endl;
return EXIT_FAILURE;
}
vector <myNumber> numbers = readNumbers(inputFile);
inputFile.close();
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate( &start );
cudaEventCreate( &stop );
myNumber *dev_numbers;
// allocating memory on GPU
cudaMalloc( (void**) &dev_numbers, numbers.size() * sizeof(struct myNumber) );
// copying data to GPU
cudaMemcpy( dev_numbers, numbers.data(), numbers.size() * sizeof(struct myNumber), cudaMemcpyHostToDevice );
cudaEventRecord( start, 0 );
// doing calculation on GPU
kernel<<< numbers.size(), 1>>>(dev_numbers, numbers.size());
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
//copying results from GPU
cudaMemcpy(numbers.data(), dev_numbers, numbers.size() * sizeof(struct myNumber), cudaMemcpyDeviceToHost );
// freeing memory from GPU
cudaFree(dev_numbers);
cout << "Time: " << elapsedTime << "ms" << endl;
printResults(numbers);
return EXIT_SUCCESS;
}
__global__ void kernel(myNumber *numbers, int size) {
int tid = blockIdx.x;
if (tid < size) {
numbers[tid].prime = isPrime(numbers[tid].value);
}
}
/**
* Reads numbers from the file.
*
* @param inputFile file to be read from
* @return list of numbers
*/
vector<myNumber> readNumbers(ifstream &inputFile) {
long number;
vector <myNumber> numbers;
while(inputFile >> number) {
myNumber n;
n.value = number;
numbers.push_back(n);
}
return numbers;
}
/**
* Tests primality (naive approach with some optimization)
*
* @param number number to be tested
* @return true if the number is pirme, otherwise false
*/
__device__ bool isPrime(long number) {
if (number == 2 || number == 3) {
return true;
} else if (number < 2 || number % 2 == 0 || number % 3 == 0) {
return false;
}
int step = 4;
for (int i = 5; i*i <= number; i += step) { //NOTICE: sqrt() is not allowed
if (number % i == 0) {
return false;
}
step = 6 - step; //HACK: if (step == 2) {step = 4;} else {step = 2;}
}
return true;
}
/**
* Prints results of the primality tests.
*
* @param numbers list of numbers
*/
void printResults(vector<myNumber> numbers) {
for(uint i = 0; i < numbers.size(); ++i) {
if (numbers[i].prime) {
cout<< numbers[i].value << ": prime" << endl;
} else {
cout<< numbers[i].value << ": composite" << endl;
}
}
}
|
8,652 | #include <iostream>
#include <math.h>
#include <stdio.h>
#include<stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <assert.h>
int main(int argc, char *argv[])
{
float *MB_ptr;
int size_char_format = atoi(argv[1]);
printf("size is %d MB\n", size_char_format );
cudaMalloc((void **) &MB_ptr, sizeof(float) * 1024*256 * size_char_format);
sleep(900);
cudaFree(MB_ptr);
return 0;
}
|
8,653 | //
// Created by rafa on 2/5/21.
//
#include "grid.cuh"
Grid *getGrid(double boxsize, int nside, long long npart, const double *positions) {
Grid *grid;
grid = (Grid *) malloc(nside * nside * nside * sizeof(Grid));
for (int i = 0; i < nside * nside * nside; i++) {
grid[i].np = 0;
}
for (long long ii = 0; ii < npart; ii++) {
int i, j, k;
int s;
i = (int) (positions[3 * ii + 0] / boxsize * nside);
j = (int) (positions[3 * ii + 1] / boxsize * nside);
k = (int) (positions[3 * ii + 2] / boxsize * nside);
i %= nside;
j %= nside;
k %= nside;
s = nside * nside * k + nside * j + i;
grid[s].np++;
}
for (int i = 0; i < nside * nside * nside; i++) {
if (grid[i].np > 0) {
grid[i].pos = (double *) malloc(3 * grid[i].np * sizeof(double));
}
grid[i].np = 0;
}
for (long long ii = 0; ii < npart; ii++) {
int i, j, k;
int s;
long long offset;
i = (int) (positions[3 * ii + 0] / boxsize * nside);
j = (int) (positions[3 * ii + 1] / boxsize * nside);
k = (int) (positions[3 * ii + 2] / boxsize * nside);
i %= nside;
j %= nside;
k %= nside;
s = nside * nside * k + nside * j + i;
offset = 3 * grid[s].np;
grid[s].pos[offset + 0] = positions[3 * ii + 0];
grid[s].pos[offset + 1] = positions[3 * ii + 1];
grid[s].pos[offset + 2] = positions[3 * ii + 2];
grid[s].np++;
}
return grid;
}
void
gridToOrderedArray(Grid *grid, int nside, double *orderedPositions, long long *numParticlesInGrid, long long *offset) {
for (int s = 0; s < nside * nside * nside; s++) {
numParticlesInGrid[s] = grid[s].np;
}
offset[0] = 0;
for (int s = 0; s < nside * nside * nside - 1; s++) {
offset[s + 1] = grid[s].np;
}
for (int s = 0; s < nside * nside * nside - 1; s++) {
offset[s + 1] += offset[s];
}
for (int s = 0; s < nside * nside * nside; s++) {
for (long long i = 0; i < grid[s].np; i++) {
orderedPositions[3 * offset[s] + 3 * i] = grid[s].pos[3 * i];
orderedPositions[3 * offset[s] + 3 * i + 1] = grid[s].pos[3 * i + 1];
orderedPositions[3 * offset[s] + 3 * i + 2] = grid[s].pos[3 * i + 2];
}
}
} |
8,654 | //#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
//
//
// __global__ void GPU_MATHEMATICS::copy(unsigned* s, const unsigned* ranks, unsigned order, const unsigned *s_LD, const unsigned* m1, const unsigned* m1_LD) {
//
// unsigned store_index = 0;
// unsigned m1_index = 0;
//
// if (order == 1) {
// copy<<<256,256>>>(s, m1, ranks[order-1]);
//
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// copy<<<256,256>>>(&s[store_index], ranks, order-1, s_LD, &m1[m1_index], m1_LD);
// store_index += s_LD[order-1];
// m1_index += m1_LD[order-1];
// }
// }
//}
//
// __global__ void GPU_MATHEMATICS::fill(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, unsigned m1) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1;
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// fill<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, m1);
// }
// }
//}
//
//
////----
//
// __global__ void GPU_MATHEMATICS::power(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, const unsigned* m1, const unsigned* m1_LD,
// const unsigned* m2, const unsigned* m2_LD) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// //s[i] = pow(m1[i], m2[i]);
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// power<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, &m2[m2_LD[order - 1] * i], m2_LD);
// }
// }
//}
//
//
// __global__ void GPU_MATHEMATICS::multiply(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, const unsigned* m1, const unsigned* m1_LD,
// const unsigned* m2, const unsigned* m2_LD) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1[i] * m2[i];
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// multiply<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, &m2[m2_LD[order - 1] * i], m2_LD);
// }
// }
//}
//
//
// __global__ void GPU_MATHEMATICS::divide(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, const unsigned* m1, const unsigned* m1_LD,
// const unsigned* m2, const unsigned* m2_LD) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1[i] / m2[i];
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// divide<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, &m2[m2_LD[order - 1] * i], m2_LD);
// }
// }
//}
//
//
// __global__ void GPU_MATHEMATICS::add(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, const unsigned* m1, const unsigned* m1_LD,
// const unsigned* m2, const unsigned* m2_LD) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1[i] + m2[i];
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// add<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, &m2[m2_LD[order - 1] * i], m2_LD);
// }
// }
//}
//
//
//
// __global__ void GPU_MATHEMATICS::subtract(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, const unsigned* m1, const unsigned* m1_LD,
// const unsigned* m2, const unsigned* m2_LD) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1[i] - m2[i];
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// subtract<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, &m2[m2_LD[order - 1] * i], m2_LD);
// }
// }
//}
//
////-----------------------------------------------Scalar Methods ----------------------------------------------------//
//
//__global__ void GPU_MATHEMATICS::power (unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD,
// const unsigned* m1, const unsigned* m1_LD, const unsigned scal) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// // s[i] = pow(m1[i], scal);
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// power<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, scal);
// }
// }
//}
//
//__global__ void GPU_MATHEMATICS::multiply(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD,
// const unsigned* m1, const unsigned* m1_LD, const unsigned scal) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1[i] * scal;
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// multiply<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, scal);
// }
// }
//}
//
//__global__ void GPU_MATHEMATICS::divide(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, const unsigned* m1,
// const unsigned* m1_LD, const unsigned scal) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1[i] / scal;
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// divide<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, scal);
// }
// }
//}
//
//__global__ void GPU_MATHEMATICS::add(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, const unsigned* m1,
// const unsigned* m1_LD, const unsigned scal) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1[i] + scal;
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// add<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, scal);
// }
// }
//}
//
//__global__ void GPU_MATHEMATICS::subtract(unsigned* s, const unsigned* s_ranks, unsigned order, const unsigned *s_LD, const unsigned* m1,
// const unsigned* m1_LD, const unsigned scal) {
// if (order == 1) {
// for (unsigned i = 0; i < s_ranks[0]; ++i) {
// s[i] = m1[i] - scal;
// }
// } else {
// for (unsigned i = 0; i < s_ranks[order - 1]; ++i) {
// subtract<<<256,256>>>(&s[s_LD[order - 1] * i], s_ranks, order - 1, s_LD, &m1[m1_LD[order - 1] * i], m1_LD, scal);
// }
// }
//}
//
|
8,655 | #include "includes.h"
__global__ void Kernel11(int N, int M, int P, float *A, float *B, float *C) {
__shared__ float sA[SIZE][SIZE];
__shared__ float sB[SIZE][SIZE];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * SIZE + ty;
int col = bx * SIZE + tx;
int m, k, iter;
float tmp = 0.0;
iter = P%SIZE;
if (iter == 0) {
for (m=0; m < P; m=m+SIZE) {
sA[ty][tx] = A[row*P + m + tx];
sB[ty][tx] = B[col + (m + ty)*M];
__syncthreads();
for (k=0; k<SIZE; k++)
tmp += sA[ty][k] * sB[k][tx];
__syncthreads();
}
}
else {
for (m=0; m < P-iter; m=m+SIZE) {
sA[ty][tx] = A[row*P + m + tx];
sB[ty][tx] = B[col + (m + ty)*M];
__syncthreads();
for (k=0; k<SIZE; k++)
tmp += sA[ty][k] * sB[k][tx];
__syncthreads();
}
if (col < P && row < N) sA[ty][tx] = A[row*P + m + tx]; else sA[ty][tx] = 0.0;
if (row < P && col < M) sB[ty][tx] = B[col + (m + ty)*M]; else sB[ty][tx] = 0.0;
__syncthreads();
for (k=0; k<iter; k++)
tmp += sA[ty][k] * sB[k][tx];
}
if ((row < N) && (col < M)) C[row*M+col] = tmp;
} |
8,656 | // Copyright (c) 2013-2019 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file mul_by_veff.cu
*
* \brief CUDA kernels to multiply wave-functions by effective potential.
*/
//#include "gpu/acc_common.hpp"
//#include "gpu/acc_runtime.hpp"
//
//template <typename T>
//__global__ void
//mul_by_veff_real_real_gpu_kernel(int nr__, T const* in__,T const* veff__, T* out__)
//{
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < nr__) {
// out__[i] = in__[i] * veff__[i];
// }
//}
//
//template <typename T>
//__global__ void
//mul_by_veff_complex_real_gpu_kernel(int nr__, gpu_complex_type<T> const* in__, T const* veff__,
// gpu_complex_type<T>* out__)
//{
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < nr__) {
// out__[i] = mul_accNumbers(veff__[i], in__[i]);
// }
//}
//
//template <typename T>
//__global__ void
//mul_by_veff_complex_complex_gpu_kernel(int nr__, gpu_complex_type<T> const* in__, T pref__, T const* vx__,
// T const* vy__, gpu_complex_type<T>* out__)
//{
// int i = blockDim.x * blockIdx.x + threadIdx.x;
// if (i < nr__) {
// out__[i] = mul_accNumbers(in__[i], make_accComplex(vx__[i], pref__ * vy__[i]));
// }
//}
//
//extern "C" {
//
//void
//mul_by_veff_real_real_gpu_float(int nr__, float const* in__, float const* veff__, float* out__)
//{
// dim3 grid_t(64);
// dim3 grid_b(num_blocks(nr__, grid_t.x));
//
// accLaunchKernel((mul_by_veff_real_real_gpu_kernel<float>), dim3(grid_b), dim3(grid_t), 0, 0,
// nr__, in__, veff__, out__);
//}
//
//void
//mul_by_veff_real_real_gpu_double(int nr__, double const* in__, double const* veff__, double* out__)
//{
// dim3 grid_t(64);
// dim3 grid_b(num_blocks(nr__, grid_t.x));
//
// accLaunchKernel((mul_by_veff_real_real_gpu_kernel<double>), dim3(grid_b), dim3(grid_t), 0, 0,
// nr__, in__, veff__, out__);
//}
//
//void
//mul_by_veff_complex_real_gpu_float(int nr__, gpu_complex_type<float> const* in__, float const* veff__,
// gpu_complex_type<float>* out__)
//{
// dim3 grid_t(64);
// dim3 grid_b(num_blocks(nr__, grid_t.x));
//
// accLaunchKernel((mul_by_veff_complex_real_gpu_kernel<float>), dim3(grid_b), dim3(grid_t), 0, 0,
// nr__, in__, veff__, out__);
//}
//
//void
//mul_by_veff_complex_real_gpu_double(int nr__, gpu_complex_type<double> const* in__, double const* veff__,
// gpu_complex_type<double>* out__)
//{
// dim3 grid_t(64);
// dim3 grid_b(num_blocks(nr__, grid_t.x));
//
// accLaunchKernel((mul_by_veff_complex_real_gpu_kernel<double>), dim3(grid_b), dim3(grid_t), 0, 0,
// nr__, in__, veff__, out__);
//}
//
//void
//mul_by_veff_complex_complex_gpu_float(int nr__, gpu_complex_type<float> const* in__, float pref__,
// float const* vx__, float const* vy__, gpu_complex_type<float>* out__)
//{
// dim3 grid_t(64);
// dim3 grid_b(num_blocks(nr__, grid_t.x));
//
// accLaunchKernel((mul_by_veff_complex_complex_gpu_kernel<float>), dim3(grid_b), dim3(grid_t), 0, 0,
// nr__, in__, pref__, vx__, vy__, out__);
//}
//
//void
//mul_by_veff_complex_complex_gpu_double(int nr__, gpu_complex_type<double> const* in__, double pref__,
// double const* vx__, double const* vy__, gpu_complex_type<double>* out__)
//{
// dim3 grid_t(64);
// dim3 grid_b(num_blocks(nr__, grid_t.x));
//
// accLaunchKernel((mul_by_veff_complex_complex_gpu_kernel<double>), dim3(grid_b), dim3(grid_t), 0, 0,
// nr__, in__, pref__, vx__, vy__, out__);
//}
//}
|
8,657 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
#define THREAD 128
#define T int
__global__ void gemv(int m, int n, T *adim, T *b, T *d_ans);
void cgemv(int m, int n, T *adim, T *b, T *d_ans);
double gettime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + (double)tv.tv_usec*1.0e-6;
}
int main(int argc, char **argv)
{
/* for CPU */
int i, j;
int *bdim, *c, *ans, *h_ans;
//double start, stop;
//double cpu_time, gpu_time;
int n = 8192;
int m = 20480;
bdim = (T*)malloc(sizeof(T) *m*n);
c = (T*)malloc(sizeof(T) *n);
ans = (T*)malloc(sizeof(T) *m);
h_ans = (T*)malloc(sizeof(T) *m);
/* for GPU */
T *d_bdim, *d_c, *d_ans;
cudaMalloc((void **)&d_bdim, sizeof(T)*m*n);
cudaMalloc((void **)&d_c, sizeof(T)*n);
cudaMalloc((void **)&d_ans, sizeof(T)*m);
for(i = 0; i < n; i++)
{
c[i] = 1;
for(j = 0; j < m; j++)
bdim[i*m+j] = 1;
}
//start = gettime();
cgemv(m, n, bdim, c, ans);
//stop = gettime();
//cpu_time=stop - start;
// Event creation
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float time1 = 0;
cudaMemcpy(d_bdim, bdim, sizeof(T)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, sizeof(T)*n, cudaMemcpyHostToDevice);
// Start timer
cudaEventRecord( start, 0 );
//start = gettime();
gemv<<<m, THREAD>>>(m, n, d_bdim, d_c, d_ans);
//stop = gettime();
// End timer
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time1, start, stop );
//gpu_time=stop - start;
cudaMemcpy(h_ans, d_ans, sizeof(T)*m, cudaMemcpyDeviceToHost);
//printf("cpu_time : %.6f[sec]\n",cpu_time);
//printf("gpu_time : %.6f[sec]\n",gpu_time);
//printf("%f x\n", cpu_time / gpu_time);
for(i = 0; i < m; i++)
printf("%d -- %d\n", ans[i], h_ans[i]);
printf("Execution time = %f ms\n", time1);
free(bdim);
free(c);
free(ans);
free(h_ans);
cudaFree(d_bdim);
cudaFree(d_c);
cudaFree(d_ans);
return 0;
}
__global__ void gemv(int m, int n, T* adim, T* b, T* d_ans)
{
int i;
int div = n/THREAD;
__shared__ T tmp[THREAD];
tmp[threadIdx.x] = 0.0;
for(i = 0; i < div; i++)
{
tmp[threadIdx.x] += adim[blockIdx.x*n+i*THREAD+threadIdx.x] * b[i * THREAD + threadIdx.x];
}
if(threadIdx.x < m%THREAD)
tmp[threadIdx.x] += adim[blockIdx.x*n+THREAD*div+threadIdx.x] * b[THREAD * div + threadIdx.x];
__syncthreads();
for(i = THREAD / 2; i > 31; i = i / 2)
{
if(threadIdx.x < i)
tmp[threadIdx.x] += tmp[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x < 16)
{
tmp[threadIdx.x] += tmp[threadIdx.x + 16];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 8];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 4];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 2];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 1];
__syncthreads();
}
if(threadIdx.x == 0)
d_ans[blockIdx.x] = tmp[0];
}
void cgemv(int m, int n, T *adim, T *b, T *d_ans)
{
int i, j;
for(i = 0; i < m; i++)
for(j = 0; j < n; j++)
d_ans[i] += adim[i*n+j] * b[j];
}
|
8,658 | #include<iostream>
__global__ void copy(double* a_in, double *a_out){
int gid = blockIdx.x*blockDim.x + threadIdx.x;
a_out[gid] = a_in[gid];
}
__global__ void simple_transpose(double* a_in, double *a_out){
int gid_in = blockIdx.x*blockDim.x + threadIdx.x;
int gid_out = threadIdx.x*blockDim.x + blockIdx.x;
a_out[gid_in] = a_in[gid_out];
}
void transpose(double* a_in, double*a_out, int xDim, int yDim){
for (int i = 0; i < xDim; ++i){
for (int j = 0; j < yDim; ++j){
int index_in = j + i*yDim;
int index_out = i + j*xDim;
a_out[index_in] = a_in[index_out];
}
}
}
void print_array(double *a, int xDim, int yDim){
for (int i = 0; i < xDim; ++i){
for (int j = 0; j < yDim; ++j){
int index = j + i*yDim;
std::cout << a[index];
if (j != yDim - 1){
std::cout << '\t';
}
}
std::cout << '\n';
}
}
int main(){
double *a_in, *a_out;
double *da_in, *da_out;
unsigned int xDim = 8;
unsigned int yDim = 8;
unsigned int gSize = xDim*yDim;
dim3 grid = {yDim, 1, 1};
dim3 threads = {xDim, 1, 1};
a_in = (double *)malloc(sizeof(double)*gSize);
a_out = (double *)malloc(sizeof(double)*gSize);
cudaMalloc((void**)&da_in, sizeof(double)*gSize);
cudaMalloc((void**)&da_out, sizeof(double)*gSize);
for (int i = 0; i < gSize; ++i){
a_in[i] = i;
}
cudaMemcpy(da_in, a_in, sizeof(double)*gSize, cudaMemcpyHostToDevice);
print_array(a_in, xDim, yDim);
std::cout << '\n';
//transpose(a_in, a_out, xDim, yDim);
simple_transpose<<<grid, threads>>>(da_in, da_out);
cudaMemcpy(a_out, da_out, sizeof(double)*gSize, cudaMemcpyDeviceToHost);
print_array(a_out, xDim, yDim);
}
|
8,659 | //
// Created by root on 2020/12/3.
//
#include "thrust/device_vector.h"
#include "thrust/inner_product.h"
#include "math.h"
#include "stdio.h"
#define N (1024 * 1024)
int main() {
thrust::device_vector<float> d_vec(N, 1.2f);
float norm = sqrt(thrust::inner_product(d_vec.begin(), d_vec.end(), d_vec.begin(), 0.0f));
// parameters: begin and end of first vector, begin of second vector(the end of it is determined by the range of the first vector) and result init value
printf("norm = %.2f\n", norm);
return 0;
} |
8,660 |
/************************************************************************
Source Code : partitionCamping.cu
Objective : To demonstrate the difference in bandwidth achieved when
blocks access global memory with and without partition
camping. This Program measures the bandwidth of global
memory for the initialization operation [a(i) = value]
using NVIDIA GPU
Input : None
Output : Bandwidth achieved and timing (average)
Modified : Aug 2011
Author : RarchK
*************************************************************************/
#include <stdio.h>
#include <cuda.h>
#define ARRAY_SIZE 2195264
#define BLOCK_SIZE 64
#define NTIMES 2
#define HLINE "--------------------------------------------------------------\n"
void printResults();
void printDeviceDetails();
void cudaSafeMalloc(void ** , size_t );
void CudaGetDeviceProperties(cudaDeviceProp *, int);
void CudaGetDevice(int *);
void checkCudaErrors();
float avgTime[2] = {0};
char *label[] = {"Without Partition Camping","With Partition Camping "};
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Kernel for initializing the array without Partition Camping
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void initializationWithoutPartitionCamping(float *array, float value, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < size)
array[idx] = value;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Kernel for initializing the array with Partition Camping
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void initializationWithPartitionCamping(float *array, float value, int size)
{
int blockIdx_x;
blockIdx_x = (blockIdx.x * 6) % gridDim.x;
int idx = threadIdx.x + blockIdx_x * blockDim.x;
if (idx < size)
array[idx] = value;
}
int main()
{
float *d_array;
size_t size = ARRAY_SIZE * sizeof(float);
int i,j;
float elapsedTimes[2][NTIMES];
cudaEvent_t start,stop;
// event creation, which will be used for timing the code
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaSafeMalloc((void **) &d_array, size);
int gridSize = ARRAY_SIZE / BLOCK_SIZE;
if(ARRAY_SIZE % BLOCK_SIZE != 0) gridSize += 1;
dim3 grid, block;
block.x = BLOCK_SIZE;
grid.x = gridSize;
for(i=0; i<NTIMES; i++)
{
// timing the initialization without Partition Camping
cudaEventRecord(start,0);
initializationWithoutPartitionCamping<<<grid, block>>>(d_array, 1.0f, ARRAY_SIZE);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[0][i],start,stop);
checkCudaErrors();
// timing the initialization with Partition Camping
cudaEventRecord(start,0);
initializationWithPartitionCamping<<< grid, block>>>(d_array, 1.0f, ARRAY_SIZE);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[1][i],start,stop);
checkCudaErrors();
}
//Computing average time taken
for(i=0; i<2; i++)
{
for(j=1; j<NTIMES; j++) //skipping first iteration
{
avgTime[i] += elapsedTimes[i][j];
}
avgTime[i] = avgTime[i]/(NTIMES-1);
}
// Printing the results
printResults();
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Host Function to print the results
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
int j;
printf("\n\n");
printf(HLINE);
printf("PARTITION CAMPING DEMONSTRATION\n");
printf(HLINE);
printDeviceDetails();
printf(HLINE);
printf("Array Size = %llu\n",(unsigned long long)ARRAY_SIZE);
printf("Block Size = %d\n",(int)BLOCK_SIZE);
printf(HLINE);
printf("Initialization Rate (GB/s) Avg time \n");
for (j=0; j<2; j++)
{
printf("%s%11.4f %11.4f \n", label[j], 1.0E-06 * (ARRAY_SIZE * sizeof(float))/avgTime[j],avgTime[j]);
}
printf(HLINE);
}
void printDeviceDetails()
{
int deviceId;
cudaDeviceProp prop;
CudaGetDevice(&deviceId);
CudaGetDeviceProperties(&prop, deviceId);
printf("Device Name is %s\n", prop.name);
//printf("Clock Rate of this device is %f GHz\n",(float)prop.clockRate * 1.0E-06);
printf("Compute Capability of this device is %d.%d\n",prop.major,prop.minor);
//printf("Number of Multiprocessors = %d\n", prop.multiProcessorCount);
//printf("Max no. of blocks allowed in a 1D Grid = %d\n", prop.maxGridSize[0]);
//printf("Max no. of threads allowed in 1D block = %d\n", prop.maxThreadsDim[0]);
//printf("Max no. of threads allowed in a block = %d\n", prop.maxThreadsPerBlock);
//printf("No. of registers per block = %d\n", prop.regsPerBlock);
//printf("Shared Memory Per block (in KB) = %f\n", (float)prop.sharedMemPerBlock * 1.0E-03);
printf("Total Global Memory available = %f GB\n",(float)prop.totalGlobalMem * 1.0E-09);
printf("Warp Size in threads = %d\n",prop.warpSize);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Wrapper Fuctions for error checking
//////////////////////////////////////////////////////////////////////////////////////////////////////////
void cudaSafeMalloc(void ** devicePtr, size_t size)
{
cudaMalloc(devicePtr, size);
checkCudaErrors();
}
void CudaGetDeviceProperties(cudaDeviceProp *devicePropPtr, int deviceId)
{
cudaGetDeviceProperties(devicePropPtr, deviceId);
checkCudaErrors();
}
void CudaGetDevice(int *deviceIdPtr)
{
cudaGetDevice(deviceIdPtr);
checkCudaErrors();
}
void checkCudaErrors()
{
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
|
8,661 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <iostream>
#include <cstdlib>
#define checkCudaErrors(val) __check( (val), #val, __FILE__, __LINE__)
template<typename T>
void __check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
int main() {
int *data;
checkCudaErrors(cudaMalloc((void **)&data, 4));
return 0;
}
|
8,662 | #include <cstdio>
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <vector>
template<typename ErrorType>
void check(ErrorType err, const char* const func, const char* const file, const int line)
{
if (err != cudaSuccess)
{
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
/**************************************************************************************
* Skeletons for variable-sized grids
**************************************************************************************/
template<typename T, int LOADS_PER_THREAD>
__global__
void MoveInOutKernel(T* const dest, const T* const source)
{
const int id = LOADS_PER_THREAD*(blockDim.x * blockIdx.x) + threadIdx.x;
// Register storage
T a[LOADS_PER_THREAD];
// Load from global memory
#pragma unroll
for (int i = 0; i < LOADS_PER_THREAD; ++i)
a[i] = source[id + i*blockDim.x];
// Store to global memory
#pragma unroll
for (int i = 0; i < LOADS_PER_THREAD; ++i)
dest[id + i*blockDim.x] = a[i];
}
template
<
typename T,
int CTA_SIZE_,
int LOADS_PER_THREAD_
>
struct MoveData
{
typedef T ElementType;
static const int CTA_SIZE = CTA_SIZE_;
static const int LOADS_PER_THREAD = LOADS_PER_THREAD_;
};
template
<
typename T,
int CTA_SIZE_,
int LOADS_PER_THREAD_
>
struct MoveDataInOut : public MoveData<T, CTA_SIZE_, LOADS_PER_THREAD_>
{
inline void operator() (T* const dest, const T* const source, const dim3& grid)
{
MoveInOutKernel<T, LOADS_PER_THREAD_><<<grid, CTA_SIZE_>>>(dest, source);
}
};
/**************************************************************************************
* Skeletons for fixed-sized grids
**************************************************************************************/
const int CTAs = 64;
//TODO Fixed-size grid performance analysis
template<typename MovingSkeleton>
void MeasureInOutVariableGrid(
MovingSkeleton skeleton,
float* const d_dest,
const float* const d_source,
size_t bytes,
float peakBandwidth)
{
typedef typename MovingSkeleton::ElementType ElementType;
const int N = bytes/sizeof(ElementType);
const int T = MovingSkeleton::CTA_SIZE;
const int E = MovingSkeleton::LOADS_PER_THREAD;
const dim3 grid((N/T)/E);
// printf("Bytes: %d\n", bytes);
// printf("T size: %d\n", sizeof(ElementType));
// printf("Problem: %d\n", N);
// printf("ThBlock: %d\n", T);
// printf("Grid: %d\n", grid.x);
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
skeleton((ElementType*)d_dest, (ElementType*)d_source, grid);
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
float totalTimeMsec = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&totalTimeMsec, start, stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
const size_t loadedBytes = bytes;
const size_t storedBytes = bytes;
const float effectiveBandwidth = (loadedBytes + storedBytes)/totalTimeMsec/1.0e6;
printf("IN-OUT-%d %4d [CTASIZE] %5d [GRID] %2zd [Bytes/element] "
"%f [ms] %7.3f [GB/s] %7.3f [GB/s] %7.3f %% of peak\n",
E, T, grid.x, sizeof(ElementType),
totalTimeMsec, peakBandwidth, effectiveBandwidth, (effectiveBandwidth / peakBandwidth) * 100);
}
void MeasureCudaMemcpy(
float* const d_dest,
const float* const d_source,
size_t bytes,
float peakBandwidth)
{
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
cudaMemcpy(d_dest, d_source, bytes, cudaMemcpyDeviceToDevice);
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
float totalTimeMsec = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&totalTimeMsec, start, stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
const size_t loadedBytes = bytes;
const size_t storedBytes = bytes;
const float effectiveBandwidth = (loadedBytes + storedBytes)/totalTimeMsec/1.0e6;
printf("MEMCPY %4d [CTASIZE] %5d [GRID] %2zd [Bytes/element] "
"%f [ms] %7.3f [GB/s] %7.3f [GB/s] %7.3f %% of peak\n",
0, 0, sizeof(float),
totalTimeMsec, peakBandwidth, effectiveBandwidth, (effectiveBandwidth / peakBandwidth) * 100);
}
template<typename ElementType>
void CreateSample(std::vector<ElementType>& array)
{
std::srand(time(0));
for (int i(0); i < array.size(); ++i)
array[i] = static_cast<ElementType>(std::rand() % 100);
}
float PeakBandwidth(int devID)
{
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, "
"no threads can use ::cudaSetDevice().\n");
exit(1);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("\nGPUDevice %d: %s\nCompute cap: %d.%d\n",
devID,
deviceProp.name,
deviceProp.major,
deviceProp.minor);
}
const int clockRate = deviceProp.memoryClockRate; // [KHz]
const int memWidth = deviceProp.memoryBusWidth; // [bits]
return 2.0 * clockRate * (memWidth/8.0) / 1.0e6; // [GB/s];
}
int main(int argc, char** argv)
{
int tilesPerCTA = 400;
if (argc > 1)
{
tilesPerCTA = atoi(argv[1]);
}
int devID = 0;
const float peakBandwidth = PeakBandwidth(devID);
const size_t ARRAY_SIZE = CTAs * 256 * tilesPerCTA;
const size_t bytes = sizeof(float) * ARRAY_SIZE;
std::vector<float> h_source(ARRAY_SIZE);
CreateSample(h_source);
printf("Problem size: %zd\n", ARRAY_SIZE);
float* d_source;
float* d_dest;
checkCudaErrors(cudaMalloc((void**) &d_source, bytes));
checkCudaErrors(cudaMalloc((void**) &d_dest, bytes));
checkCudaErrors(cudaMemcpy(d_source, h_source.data(), bytes, cudaMemcpyHostToDevice));
printf("======================================================================"
"===================================================================\n");
printf("Skeletons for variable-sized grids\n");
printf("======================================================================"
"===================================================================\n");
MeasureInOutVariableGrid(MoveDataInOut< float, 128, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 128, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 128, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 256, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 256, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 256, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 512, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 512, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 512, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 1024, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 1024, 1 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 1024, 1 >(), d_dest, d_source, bytes, peakBandwidth);
printf("======================================================================"
"===================================================================\n");
MeasureInOutVariableGrid(MoveDataInOut< float, 128, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 128, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 128, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 256, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 256, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 256, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 512, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 512, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 512, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 1024, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 1024, 2 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 1024, 2 >(), d_dest, d_source, bytes, peakBandwidth);
printf("======================================================================"
"===================================================================\n");
MeasureInOutVariableGrid(MoveDataInOut< float, 128, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 128, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 128, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 256, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 256, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 256, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 512, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 512, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 512, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float, 1024, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float2, 1024, 4 >(), d_dest, d_source, bytes, peakBandwidth);
MeasureInOutVariableGrid(MoveDataInOut< float4, 1024, 4 >(), d_dest, d_source, bytes, peakBandwidth);
printf("======================================================================"
"===================================================================\n");
MeasureCudaMemcpy(d_dest, d_source, bytes, peakBandwidth);
printf("======================================================================"
"===================================================================\n");
printf("Skeletons for fixed-sized grids\n");
printf("======================================================================"
"===================================================================\n");
// TODO Fixed-size grid performance analysis
printf("======================================================================"
"===================================================================\n");
checkCudaErrors(cudaMemcpy(h_source.data(), d_dest, bytes, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_source));
checkCudaErrors(cudaFree(d_dest));
checkCudaErrors(cudaDeviceReset());
}
|
8,663 | /*
* squaring map kernel that runs in 1 block
*/
/*
* runs on and callable from the device
*/
__device__ float square(float x) {
return x*x;
}
/*
* runs on device, callable from anywhere
*/
__global__ void map(float* out, float* in, int size) {
int index = threadIdx.x;
if (index >= size) return;
out[index] = square(in[index]);
}
|
8,664 | #include "includes.h"
__global__ void kernelVector_suma_constante(float* array, int _size, int _constant){
int idx= blockIdx.x * blockDim.x + threadIdx.x;
if(idx < _size){
array[idx] = array[idx]+_constant;
}
} |
8,665 | #include <stdio.h>
#include <stdlib.h>
#define _size 3 //Define size of matrix to be 3 by 3;
//__shared__ int result[_size*_size];
__global__ void multiply(int *result, int *A, int *B)
{
/* OLD logic
We have a 3 by 3 grid and each block has 3 threads.
So rows = block x id, cols = block y id
So Indices will be C[block X id][block Y id] = A[block X id][threads 0, 1, 2] * B[threads 0, 1, 2][block y id]
*/
//__shared__ int result[_size*_size] ;
/*result[blockIdx.x*blockDim.x +blockIdx.y] += A[blockIdx.x*blockDim.x + threadIdx.x]*B[blockDim.x*threadIdx.x+blockIdx.y];
printf("C[%d] = A[%d]*B[%d] = %d*%d\n",blockIdx.x*blockDim.x +blockIdx.y, blockIdx.x*blockDim.x + threadIdx.x, blockDim.x*threadIdx.x+blockIdx.y,
A[blockIdx.x*blockDim.x + threadIdx.x],B[blockDim.x*threadIdx.x+blockIdx.y]);
Res[blockIdx.x*blockDim.x +blockIdx.y]= result[blockIdx.x*blockDim.x +blockIdx.y];*/
/* NEW logic
I have 3 blocks and 3 threads. Each thread calculates entry for each position compared to the old one having each thread multiplying one value.
So indices will be result[block x id][thread id] = A[block x id][i]* B[i][thread x id]
*/
for(int i=0; i<_size;i++)
{
result[blockIdx.x*blockDim.x +threadIdx.x] += A[blockIdx.x*blockDim.x+i]*B[blockDim.x*i+threadIdx.x];
}
}
int main(int argc, char const *argv[])
{
int *d_c,*d_a,*d_b;
int size = _size*sizeof(int)*_size;
int *a = reinterpret_cast<int*>(malloc(size));
int *b = reinterpret_cast<int*>(malloc(size));
//*result = reinterpret_cast<int*>(malloc(size));
int *c = reinterpret_cast<int*>(malloc(size));
//Getting Matrix A
printf("Enter matrix A\n");
for (int i = 0; i < _size; ++i)
{
for (int j = 0; j < _size; ++j)
{
scanf("%d",&*(a+i*_size+j) );
}
}
//Printing matrix A
printf("A is: \n");
for (int i = 0; i < _size; ++i)
{
for (int j = 0; j < _size; ++j)
{
printf("%d ",*(a+i*_size+j) );
}
printf("\n");
}
//Gtting matrix B
printf("Enter matrix B\n");
for (int i = 0; i < _size; ++i)
{
for (int j = 0; j < _size; ++j)
{
scanf("%d",&*(b+i*_size+j));
}
}
//Printing matrix B
printf("B is: \n");
for (int i = 0; i < _size; ++i)
{
for (int j = 0; j < _size; ++j)
{
printf("%d ",*(b+i*_size+j) );
}
printf("\n");
}
memset(c,0,size);
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_b,size);
cudaMalloc((void **)&d_c,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_c,c,size,cudaMemcpyHostToDevice);
multiply<<<_size,_size>>>(d_c,d_a,d_b);
cudaThreadSynchronize();
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("C is: \n");
for (int i = 0; i < _size; ++i)
{
for (int j = 0; j < _size; ++j)
{
printf("%d ",*(c+i*_size+j) );
}
printf("\n");
}
return 0;
} |
8,666 | #include "kernel.cuh"
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
__global__ void add_cuda_kernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
|
8,667 |
/*
#define DIM 3
#define P_SCALE 30.0f
#define RIGHT P_SCALE
#define LEFT -P_SCALE
#define UP (2*P_SCALE)
#define DOWN 0
#define FRONT P_SCALE
#define BACK -P_SCALE
#define OFFSET 0.01
#define CONE_HEIGHT 1.0
#define CONE_RADIUS 0.5
__device__ bool Cone_ConeTest(float c1_x,float c1_y,float c1_z,float c1_size,
float c2_x,float c2_y,float c2_z,float c2_size)
{
if(c1_y > c2_y){
float height_dif = c1_y - c2_y;
if( height_dif > c1_size*CONE_HEIGHT)
return 0;
else{
float new_radius = (c1_size*CONE_RADIUS * height_dif)/(c1_size*CONE_HEIGHT);
float dist =
(c1_x - c2_x) * (c1_x - c2_x) +
(c1_z - c2_z) * (c1_z - c2_z);
float minDist = new_radius + c2_size*CONE_RADIUS;
return dist <= minDist * minDist;
}
}else{
float height_dif = c2_y - c1_y;
if( height_dif > c2_size*CONE_HEIGHT)
return 0;
else{
float new_radius = (c2_size*CONE_RADIUS * height_dif)/(c2_size*CONE_HEIGHT);
float dist =
(c1_x - c2_x) * (c1_x - c2_x) +
(c1_z - c2_z) * (c1_z - c2_z);
float minDist = new_radius + c1_size*CONE_RADIUS;
return dist <= minDist * minDist;
}
}
return 1;
}
__global__ void launch_Cone(float* cone_poz_d,
float* cone_speed_d,
float* cone_size_d,
int NR_CONES
);
// Kernelul ce se executa pe device-ul CUDA
__global__ void launch_Cone(float* cone_poz_d,
float* cone_speed_d,
float* cone_size_d,
int NR_CONES
)
{
//calculate position
//unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if(cone_poz_d[y*DIM] >= (RIGHT -OFFSET) || cone_poz_d[y*DIM] <= (LEFT + OFFSET))
cone_speed_d[y*DIM] = -cone_speed_d[y*DIM];
if(cone_poz_d[1+y*DIM] >= (UP - OFFSET) || cone_poz_d[1+y*DIM] <= (DOWN + OFFSET))
cone_speed_d[1+y*DIM] = -cone_speed_d[1+y*DIM];
if(cone_poz_d[2+y*DIM] >= (FRONT - OFFSET) || cone_poz_d[2+y*DIM] <= (BACK + OFFSET))
cone_speed_d[2+y*DIM] = -cone_speed_d[2+y*DIM];
for(int j = (y+1)*DIM ; j < NR_CONES ; j=j+DIM) {
if(Cone_ConeTest(
cone_poz_d[y*DIM],cone_poz_d[1+y*DIM],cone_poz_d[2+y*DIM],cone_size_d[y*DIM],
cone_poz_d[j], cone_poz_d[1+j], cone_poz_d[2+j], cone_size_d[j])) {
cone_speed_d[j] = -cone_speed_d[j];
cone_speed_d[1+j] = -cone_speed_d[1+j];
cone_speed_d[2+j] = -cone_speed_d[2+j];
}
}
cone_poz_d[y*DIM] += cone_speed_d[y*DIM];
cone_poz_d[1+y*DIM] += cone_speed_d[1+y*DIM];
cone_poz_d[2+y*DIM] += cone_speed_d[2+y*DIM];
}
extern "C"
cudaError_t launch_Cone(float* cone_poz_d,
float* cone_speed_d,
float* cone_size_d,
int NR_CONES,
dim3 DIM_GRID,
dim3 DIM_BLOCK)
{
launch_Cone <<<DIM_GRID, DIM_BLOCK>>> (cone_poz_d,
cone_speed_d,
cone_size_d,
NR_CONES);
return cudaGetLastError();
}
*/ |
8,668 | #include <stdio.h>
#include <assert.h>
#include <math.h>
#include <vector>
#include <queue>
#include <ctime>
// CUDA runtime
#include <cuda_runtime.h>
using namespace std;
// Thread block size
#define TBS 512
// Warp size
#define WS 32
class Node {
private:
int value;
int* children;
int numChildren;
int explored;
public:
Node();
Node(int);
__host__ __device__ int getValue();
void addChild(Node*);
__host__ __device__ int* getChildren();
__host__ __device__ int getNumChildren();
void printNode();
void initializeChildren(int);
__host__ __device__ int getExplored();
void setExplored(int);
__device__ int parallelSetExplored(int);
};
__global__ void parentListBackwardsWave(int *d_waveMask, int *d_nextWaveMask, int *d_parent, int *d_parentPtr, int *d_cost, int *d_size) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 0) {
// Loop through all children
for (int i = d_parentPtr[idx]; i < d_parentPtr[idx + 1]; i++) {
if (d_waveMask[d_parent[i]] == 1) {
atomicCAS(&d_nextWaveMask[idx], 0, 1);
d_cost[idx] = d_cost[d_parent[i]] + 1;
break;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void backwardsWave(int *d_waveMask, int *d_nextWaveMask, int *d_children, int *d_numChildren, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 0) {
// Loop through all children
for (int i = 0; i < *d_size * *d_maxChildren; i++) {
if (d_children[i] == idx) {
int parent = i / *d_maxChildren;
if (d_waveMask[parent] == 1) {
atomicCAS(&d_nextWaveMask[idx], 0, 1);
d_cost[idx] = d_cost[parent] + 1;
break;
}
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void childListExploreWave(int *d_waveMask, int *d_nextWaveMask, int *d_children, int *d_numChildren, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 1) {
int numChildren = d_numChildren[idx];
for (int i = 0; i < numChildren; i++) {
int child = d_children[idx * *d_maxChildren + i];
atomicCAS(&d_nextWaveMask[child],0,1);
if (d_waveMask[child] == 0) {
d_cost[child] = d_cost[idx] + 1;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void exploreWave(int *d_waveMask, int *d_nextWaveMask, Node *d_graph, int *d_children, int *d_cost, int *d_size, int *d_maxChildren) {
int idx = blockIdx.x * TBS + threadIdx.x;
if (idx < *d_size && d_waveMask[idx] == 1) {
Node currentNode = d_graph[idx];
int numChildren = currentNode.getNumChildren();
for (int i = 0; i < numChildren; i++) {
int child = d_children[idx * *d_maxChildren + i];
atomicCAS(&d_nextWaveMask[child],0,1);
if (d_waveMask[child] == 0) {
d_cost[child] = d_cost[idx] + 1;
}
}
}
if(idx < *d_size && d_waveMask[idx] == 2){
d_nextWaveMask[idx] = 2;
}
}
__global__ void setPreviousExplored(int *d_waveMask, int *d_nextWaveMask, int *d_size){
int idx = blockIdx.x * TBS + threadIdx.x;
if(idx < *d_size){
if(d_waveMask[idx] == 1){
d_nextWaveMask[idx] = 2;
}
}
}
int* generateChildren(Node *nodes, int nNodes, int maxEdgesPerNode) {
int* children = new int[nNodes * maxEdgesPerNode];
for (int i = 0; i < nNodes; i++) {
int numEdges = (rand() % maxEdgesPerNode) + 1;
nodes[i].initializeChildren(numEdges);
for (int j = 0; j < numEdges; j++) {
int child = rand() % nNodes;
bool isChild = false;
for (int k = 0; k < nodes[i].getNumChildren(); k++){
if (child == nodes[i].getChildren()[k]){
isChild = true;
break;
}
}
if (!isChild && child != nodes[i].getValue()){
children[i * maxEdgesPerNode + nodes[i].getNumChildren()] = child;
nodes[i].addChild(&nodes[child]);
}
}
}
/*for (int i = 0; i < nNodes; i++) {
nodes[i].printNode();
}*/
return children;
}
Node* generateGraph(int nNodes) {
srand((unsigned)time(0));
Node* nodes = new Node[nNodes];
for (int i = 0; i < nNodes; i++) {
Node* tmp = new Node(i);
nodes[i] = *tmp;
}
return nodes;
}
void exploreChild(Node* child, vector< vector<Node*> >* path, int depth, Node* nodes) {
int numChildren = child->getNumChildren();
if (numChildren > 0) {
bool *toExplore = new bool[numChildren];
vector<Node*> newPath;
if (path->size() <= depth) {
path->push_back(newPath);
}
vector<Node*>* currentPath = &(path->at(depth));
for (int i = 0; i < numChildren; i++) {
Node* newChild = &nodes[child->getChildren()[i]];
if (newChild->getExplored() == 0) {
currentPath->push_back(newChild);
newChild->setExplored(1);
toExplore[i] = true;
} else {
toExplore[i] = false;
}
}
// Explore loop after push loop so it is actually BFS
for (int i = 0; i < numChildren; i++) {
Node* newChild = &nodes[child->getChildren()[i]];
if (toExplore[i]) {
exploreChild(newChild, path, depth + 1, nodes);
}
}
}
child->setExplored(2);
return;
}
int* bfs(Node* nodes, int size) {
int* cost = new int[size];
for (int i = 0; i < size; i++) {
cost[i] = -1;
}
Node* currentNode = &nodes[0];
queue<Node*> wave;
wave.push(currentNode);
cost[0] = 0;
int depth = 0;
while (!wave.empty()) {
depth = cost[wave.front()->getValue()];
while (!wave.empty() && depth == cost[wave.front()->getValue()]) {
currentNode = wave.front();
wave.pop();
currentNode->setExplored(1);
if (currentNode->getNumChildren() > 0) {
int *children = currentNode->getChildren();
for (int i = 0; i < currentNode->getNumChildren(); i++) {
if (nodes[children[i]].getExplored() == 0) {
nodes[children[i]].setExplored(1);
cost[children[i]] = depth + 1;
wave.push(&nodes[children[i]]);
}
}
}
}
}
return cost;
}
int* transformBfs(vector< vector<Node*> > path, int size) {
int *result = new int[size];
for (int i = 0; i < size; i++) {
result[i] = -1;
}
for (int i = 0; i < path.size(); i++) {
//printf("%i - ", i);
for (int j = 0; j < path[i].size(); j++) {
//printf(" %i ", path[i][j]->getValue());
result[path[i][j]->getValue()] = i;
}
//printf("\n");
}
return result;
}
int* transformNumChildren(Node* nodes, int size) {
int *result = new int[size];
for (int i = 0; i < size; i++) {
result[i] = nodes[i].getNumChildren();
}
return result;
}
int* transformParentPtr(Node* nodes, int size) {
int *result = new int[size + 1];
for (int i = 0; i < size; i++) {
result[i] = 0;
}
for (int i = 0; i < size; i++) {
Node *node = &nodes[i];
if (node->getNumChildren() > 0) {
int *children = node->getChildren();
for (int j = 0; j < node->getNumChildren(); j++) {
int child = children[j];
result[child + 1] += 1;
}
}
}
for (int i = 1; i < size + 1; i++) {
result[i] = result[i] + result[i - 1];
}
return result;
}
int* transformParents(Node* nodes, int size, int* parentPtr) {
int numEdges = parentPtr[size];
int *result = new int[numEdges];
int *curIdx = new int[size];
for (int i = 0; i < size; i++) {
curIdx[i] = parentPtr[i];
}
for (int i = 0; i < size; i++) {
Node *node = &nodes[i];
if (node->getNumChildren() > 0) {
int *children = node->getChildren();
for (int j = 0; j < node->getNumChildren(); j++) {
int child = children[j];
result[curIdx[child]] = i;
curIdx[child] = curIdx[child] + 1;
}
}
}
return result;
}
void callFlipFlopParent(int *d_size, int *d_children, int *d_numChildren, int *d_maxChildren, int *d_parent, int *d_parentPtr, int size, int maxChildren, int *synchResult) {
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
cudaMalloc((void **)&d_cost, size * sizeof(int));
cudaMalloc((void **)&d_waveMask, size * sizeof(int));
cudaMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
cudaMemcpy(d_cost, cost, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_waveMask, waveMask, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
// Record the start event
cudaEventRecord(start, NULL);
bool complete = false;
int completed = 0;
while(!complete) {
// Launch kernel on GPU
if (completed < (maxChildren * maxChildren - 1) / (maxChildren * maxChildren) * size) {
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
} else {
parentListBackwardsWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_parent, d_parentPtr, d_cost, d_size);
}
cudaDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
cudaDeviceSynchronize();
cudaMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
complete = true;
cudaMemcpy(waveMask, d_waveMask, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++) {
if(waveMask[i] == 1) {
complete = false;
} else if (waveMask[i] == 2) {
completed += 1;
}
}
}
// Make sure result is finished
cudaDeviceSynchronize();
// Record end event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU Parent Flip Flop Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
cudaMemcpy(gpu_result, d_cost, size * sizeof(int), cudaMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callFlipFlopWaveExplore(int *d_size, int *d_children, int *d_numChildren, int size, int *d_maxChildren, int maxChildren, int *synchResult) {
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
cudaMalloc((void **)&d_cost, size * sizeof(int));
cudaMalloc((void **)&d_waveMask, size * sizeof(int));
cudaMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
cudaMemcpy(d_cost, cost, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_waveMask, waveMask, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
// Record the start event
cudaEventRecord(start, NULL);
bool complete = false;
int completed = 0;
while(!complete) {
// Launch kernel on GPU
if (completed < (maxChildren * maxChildren - 1) / (maxChildren * maxChildren) * size) {
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
} else {
backwardsWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
}
cudaDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
cudaDeviceSynchronize();
cudaMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
complete = true;
cudaMemcpy(waveMask, d_waveMask, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++) {
if(waveMask[i] == 1) {
complete = false;
} else if (waveMask[i] == 2) {
completed += 1;
}
}
}
// Make sure result is finished
cudaDeviceSynchronize();
// Record end event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU Flip Flop Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
cudaMemcpy(gpu_result, d_cost, size * sizeof(int), cudaMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callChildListExploreWave(int *d_size, int *d_children, int *d_numChildren, int size, int *d_maxChildren, int *synchResult) {
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
cudaMalloc((void **)&d_cost, size * sizeof(int));
cudaMalloc((void **)&d_waveMask, size * sizeof(int));
cudaMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
cudaMemcpy(d_cost, cost, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_waveMask, waveMask, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
// Record the start event
cudaEventRecord(start, NULL);
bool complete = false;
while(!complete) {
// Launch kernel on GPU
childListExploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_children, d_numChildren, d_cost, d_size, d_maxChildren);
cudaDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
cudaDeviceSynchronize();
cudaMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
complete = true;
cudaMemcpy(waveMask, d_waveMask, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++){
if(waveMask[i] == 1){
complete = false;
}
}
}
// Make sure result is finished
cudaDeviceSynchronize();
// Record end event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU Child List Explore Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
cudaMemcpy(gpu_result, d_cost, size * sizeof(int), cudaMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
void callDeviceCachedVisitBFS(Node *d_graph, int *d_size, int *d_children, int size, int *d_maxChildren, int *synchResult) {
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
int *d_cost, *d_waveMask, *d_nextWaveMask;
// Allocate space for device copies
cudaMalloc((void **)&d_cost, size * sizeof(int));
cudaMalloc((void **)&d_waveMask, size * sizeof(int));
cudaMalloc((void **)&d_nextWaveMask, size * sizeof(int));
int gridSz = ceil(((float) size) / TBS);
int *waveMask = new int[size];
int *nextWaveMask = new int[size];
int *cost = new int[size];
cost[0] = 0;
for (int i = 1; i < size; i++) {
cost[i] = -1;
waveMask[i] = 0;
nextWaveMask[i] = 0;
}
waveMask[0] = 1;
cudaMemcpy(d_cost, cost, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_waveMask, waveMask, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
// Record the start event
cudaEventRecord(start, NULL);
bool complete = false;
while(!complete) {
// Launch kernel on GPU
exploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_graph, d_children, d_cost, d_size, d_maxChildren);
cudaDeviceSynchronize();
setPreviousExplored<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_size);
cudaDeviceSynchronize();
cudaMemcpy(d_waveMask, d_nextWaveMask, size * sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_nextWaveMask, nextWaveMask, size * sizeof(int), cudaMemcpyHostToDevice);
//exploreWave<<<gridSz, TBS>>>(d_waveMask, d_nextWaveMask, d_graph, d_children, d_cost, d_size, d_maxChildren);
complete = true;
cudaMemcpy(waveMask, d_waveMask, size * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < size; i++){
if(waveMask[i] == 1){
complete = false;
}
}
}
// Make sure result is finished
cudaDeviceSynchronize();
// Record end event
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("GPU Wave Time= %.3f msec\n", msecTotal);
// Copy result back to host
int *gpu_result = (int *) malloc(size * sizeof(int));
cudaMemcpy(gpu_result, d_cost, size * sizeof(int), cudaMemcpyDeviceToHost);
bool isCorrect = true;
for (int i = 0; i < size; i++) {
if (synchResult[i] != gpu_result[i]) {
isCorrect = false;
printf("%i CPU: %i GPU:%i\n", i, synchResult[i], gpu_result[i]);
}
}
if (!isCorrect) {
printf("The results do not match\n");
} else {
printf("The results match\n");
}
}
int main (int argc, char **argv) {
if (argc !=3) {
printf("\nToo few arguments!\n");
abort();
}
// Get command line argument
int size = atoi(argv[1]);
int maxEdgesPerNode = atoi(argv[2]);
Node* nodes = generateGraph(size);
int* children = generateChildren(nodes, size, maxEdgesPerNode);
int* numChildren = transformNumChildren(nodes, size);
int* parentPtr = transformParentPtr(nodes, size);
int numEdges = parentPtr[size];
int* parent = transformParents(nodes, size, parentPtr);
/*for (int i = 0; i < size + 1; i++) {
printf("%i parentPtr: %i\n", i, parentPtr[i]);
}
for (int i = 0; i < size; i++) {
for (int j = parentPtr[i]; j < parentPtr[i + 1]; j++) {
printf("%i child: %i parent: %i\n", j, i, parent[j]);
}
}*/
Node* d_graph;
int *d_children, *d_size, *d_maxChildren, *d_numChildren, *d_parent, *d_parentPtr;
// Allocate space for device copies
cudaMalloc((void **)&d_graph, size * sizeof(Node));
cudaMalloc((void **)&d_size, sizeof(int));
cudaMalloc((void **)&d_maxChildren, sizeof(int));
cudaMalloc((void **)&d_children, size * maxEdgesPerNode * sizeof(int));
cudaMalloc((void **)&d_numChildren, size * sizeof(int));
cudaMalloc((void **)&d_parentPtr, (size + 1) * sizeof(int));
cudaMalloc((void **)&d_parent, numEdges * sizeof(int));
// Copy inputs to device
cudaMemcpy(d_graph, nodes, size * sizeof(Node), cudaMemcpyHostToDevice);
cudaMemcpy(d_size, &size, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_maxChildren, &maxEdgesPerNode, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_children, children, size * maxEdgesPerNode * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_numChildren, numChildren, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_parentPtr, parentPtr, (size + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_parent, parent, numEdges * sizeof(int), cudaMemcpyHostToDevice);
//Synchronouse bfs
//vector< vector<Node*> > path = bfs(nodes, size);
clock_t start;
clock_t end;
start = clock();
int *synchResult = bfs(nodes, size);
end = clock();
printf("CPU Time= %.3f msec\n", (end - start) / (double) (CLOCKS_PER_SEC / 1000));
callDeviceCachedVisitBFS(d_graph, d_size, d_children, size, d_maxChildren, synchResult);
callChildListExploreWave(d_size, d_children, d_numChildren, size, d_maxChildren, synchResult);
//callFlipFlopWaveExplore(d_size, d_children, d_numChildren, size, d_maxChildren, maxEdgesPerNode, synchResult);
callFlipFlopParent(d_size, d_children, d_numChildren, d_maxChildren, d_parent, d_parentPtr, size, maxEdgesPerNode, synchResult);
// Cleanup
cudaFree(d_graph);
cudaFree(d_size);
cudaFree(d_children);
cudaFree(d_numChildren);
cudaFree(d_maxChildren);
return 0;
}
Node::Node(int newValue) {
value = newValue;
explored = 0;
}
Node::Node() {
}
__host__ __device__ int Node::getValue() {
return value;
}
__host__ __device__ int* Node::getChildren() {
return children;
}
__host__ __device__ int Node::getNumChildren() {
return numChildren;
}
void Node::addChild(Node* child) {
children[numChildren] = child->getValue();
numChildren++;
return;
}
void Node::printNode() {
printf("Value: %i Children: [", value);
for (int i = 0; i < numChildren; i++) {
printf("%i", children[i]);
if (i != numChildren - 1) {
printf(", ");
}
}
printf("]\n");
return;
}
void Node::initializeChildren(int numEdges) {
children = new int[numEdges];
}
__host__ __device__ int Node::getExplored() {
return explored;
}
__device__ int Node::parallelSetExplored(int newExplored) {
return atomicExch(&explored, newExplored);
}
void Node::setExplored(int newExplored) {
explored = newExplored;
return;
}
|
8,669 |
#include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <sys/time.h>
#include <cooperative_groups.h>
__global__ void
ac(float *A, const int *B, const int *C, const int *op_sel, int n_inputs, const int n_arith, int thresh, int iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
int idx_off= i*n_inputs;
float val_146, val_147, val_148, val_149, val_150, val_151, val_152, val_153, val_154, val_155, val_156, val_157, val_158, val_159, val_160, val_161, val_162, val_163, val_164, val_165, val_166, val_167, val_168, val_169, val_170, val_171, val_172, val_173, val_174, val_175, val_176, val_177, val_178, val_179, val_180, val_181, val_182, val_183, val_184, val_185, val_186, val_187, val_188, val_189, val_190, val_191, val_192, val_193, val_194, val_195, val_196, val_197, val_198, val_199, val_200, val_201, val_202, val_203, val_204, val_205, val_206, val_207, val_208, val_209, val_210, val_211, val_212, val_213, val_214, val_215, val_216, val_217, val_218, val_219, val_220, val_221, val_222, val_223, val_224, val_225, val_226, val_227, val_228, val_229, val_230, val_231, val_232, val_233, val_234, val_235, val_236, val_237, val_238, val_239, val_240, val_241, val_242, val_243, val_244, val_245, val_246, val_247, val_248, val_249, val_250, val_251, val_252, val_253, val_254, val_255, val_256, val_257, val_258, val_259, val_260, val_261, val_262, val_263, val_264, val_265, val_266, val_267, val_268, val_269, val_270, val_271, val_272, val_273, val_274, val_275, val_276, val_277, val_278, val_279, val_280, val_281, val_282, val_283, val_284, val_285, val_286, val_287, val_288, val_289, val_290, val_291, val_292, val_293, val_294, val_295, val_296, val_297, val_298, val_299, val_300, val_301, val_302, val_303, val_304, val_305, val_306, val_307, val_308, val_309, val_310, val_311, val_312, val_313, val_314, val_315, val_316, val_317, val_318, val_319, val_320, val_321, val_322, val_323, val_324, val_325, val_326, val_327, val_328, val_329, val_330, val_331, val_332, val_333, val_334, val_335, val_336, val_337, val_338, val_339, val_340, val_341, val_342, val_343, val_344, val_345, val_346, val_347, val_348, val_349, val_350, val_351, val_352, val_353, val_354, val_355, val_356, val_357, val_358, val_359, val_360, val_361, val_362, val_363, val_364, val_365, val_366, val_367, val_368, val_369, val_370, val_371, val_372, val_373, val_374, val_375, val_376, val_377, val_378, val_379, val_380, val_381, val_382, val_383, val_384, val_385, val_386, val_387, val_388, val_389, val_390, val_391, val_392, val_393, val_394, val_395, val_396, val_397, val_398, val_399, val_400, val_401, val_402, val_403, val_404, val_405, val_406, val_407, val_408, val_409, val_410, val_411, val_412, val_413, val_414, val_415, val_416, val_417, val_418, val_419, val_420, val_421, val_422, val_423, val_424, val_425, val_426, val_427, val_428, val_429, val_430, val_431, val_432, val_433, val_434, val_435, val_436, val_437, val_438, val_439, val_440, val_441, val_442, val_443, val_444, val_445, val_446, val_447, val_448, val_449, val_450, val_451, val_452, val_453, val_454, val_455, val_456, val_457, val_458, val_459, val_460, val_461, val_462, val_463, val_464, val_465, val_466, val_467, val_468, val_469, val_470, val_471, val_472, val_473, val_474, val_475, val_476, val_477, val_478, val_479, val_480, val_481, val_482, val_483, val_484, val_485, val_486, val_487, val_488, val_489, val_490, val_491, val_492, val_493, val_494, val_495, val_496, val_497, val_498, val_499, val_500, val_501, val_502, val_503, val_504, val_505, val_506, val_507, val_508, val_509, val_510, val_511, val_512, val_513, val_514, val_515, val_516, val_517, val_518, val_519, val_520, val_521, val_522, val_523, val_524, val_525, val_526, val_527, val_528, val_529, val_530, val_531, val_532, val_533, val_534, val_535, val_536, val_537, val_538, val_539, val_540, val_541, val_542, val_543, val_544, val_545, val_546, val_547, val_548, val_549, val_550, val_551, val_552, val_553, val_554, val_555, val_556, val_557, val_558, val_559, val_560, val_561, val_562, val_563, val_564, val_565, val_566, val_567, val_568, val_569, val_570, val_571, val_572, val_573, val_574, val_575, val_576, val_577, val_578, val_579, val_580, val_581, val_582, val_583, val_584, val_585, val_586, val_587, val_588, val_589, val_590, val_591, val_592, val_593, val_594, val_595, val_596, val_597, val_598, val_599, val_600, val_601, val_602, val_603, val_604, val_605, val_606, val_607, val_608, val_609, val_610, val_611, val_612, val_613, val_614, val_615, val_616, val_617, val_618, val_619, val_620, val_621, val_622, val_623, val_624, val_625, val_626, val_627, val_628, val_629, val_630, val_631, val_632, val_633, val_634, val_635, val_636, val_637, val_638, val_639, val_640, val_641, val_642, val_643, val_644, val_645, val_646, val_647, val_648, val_649, val_650, val_651, val_652, val_653, val_654, val_655, val_656, val_657, val_658, val_659, val_660, val_661, val_662, val_663, val_664, val_665, val_666, val_667, val_668, val_669, val_670, val_671, val_672, val_673, val_674, val_675, val_676, val_677, val_678, val_679, val_680, val_681, val_682, val_683, val_684, val_685, val_686, val_687, val_688, val_689, val_690, val_691, val_692, val_693, val_694, val_695, val_696, val_697, val_698, val_699, val_700, val_701, val_702, val_703, val_704, val_705, val_706, val_707, val_708, val_709, val_710, val_711, val_712, val_713, val_714, val_715, val_716, val_717, val_718, val_719, val_720, val_721, val_722, val_723, val_724, val_725, val_726, val_727, val_728, val_729, val_730, val_731, val_732, val_733, val_734, val_735, val_736, val_737, val_738, val_739, val_740, val_741, val_742, val_743, val_744, val_745, val_746, val_747, val_748, val_749, val_750, val_751, val_752, val_753, val_754, val_755, val_756, val_757, val_758, val_759, val_760, val_761, val_762, val_763, val_764, val_765, val_766, val_767, val_768, val_769, val_770, val_771, val_772, val_773, val_774, val_775, val_776, val_777, val_778, val_779, val_780, val_781, val_782, val_783, val_784, val_785, val_786, val_787, val_788, val_789, val_790, val_791, val_792, val_793, val_794, val_795, val_796, val_797, val_798, val_799, val_800, val_801, val_802, val_803, val_804, val_805, val_806, val_807, val_808, val_809, val_810, val_811, val_812, val_813, val_814, val_815, val_816, val_817, val_818, val_819, val_820, val_821, val_822, val_823, val_824, val_825, val_826, val_827, val_828, val_829, val_830, val_831, val_832, val_833, val_834, val_835, val_836, val_837, val_838, val_839, val_840, val_841, val_842, val_843, val_844, val_845, val_846, val_847, val_848, val_849, val_850, val_851, val_852, val_853, val_854, val_855, val_856, val_857, val_858, val_859, val_860, val_861, val_862, val_863, val_864, val_865, val_866, val_867, val_868, val_869, val_870, val_871, val_872, val_873, val_874, val_875, val_876, val_877, val_878, val_879, val_880, val_881, val_882, val_883, val_884, val_885, val_886, val_887, val_888, val_889, val_890, val_891, val_892, val_893, val_894, val_895, val_896, val_897, val_898, val_899, val_900, val_901, val_902, val_903, val_904, val_905, val_906, val_907, val_908, val_909, val_910, val_911, val_912, val_913, val_914, val_915, val_916, val_917, val_918, val_919, val_920, val_921, val_922, val_923, val_924, val_925, val_926, val_927, val_928, val_929, val_930, val_931, val_932, val_933, val_934, val_935, val_936, val_937, val_938, val_939, val_940, val_941, val_942, val_943, val_944, val_945, val_946, val_947, val_948, val_949, val_950, val_951, val_952, val_953, val_954, val_955, val_956, val_957, val_958, val_959, val_960, val_961, val_962, val_963, val_964, val_965, val_966, val_967, val_968, val_969, val_970, val_971, val_972, val_973, val_974, val_975, val_976, val_977, val_978, val_979, val_980, val_981, val_982, val_983, val_984, val_985, val_986, val_987, val_988, val_989, val_990, val_991, val_992, val_993, val_994, val_995, val_996, val_997, val_998, val_999, val_1000, val_1001, val_1002, val_1003, val_1004, val_1005, val_1006, val_1007, val_1008, val_1009, val_1010, val_1011, val_1012, val_1013, val_1014, val_1015, val_1016, val_1017, val_1018, val_1019, val_1020, val_1021, val_1022, val_1023, val_1024, val_1025, val_1026, val_1027, val_1028, val_1029, val_1030, val_1031, val_1032, val_1033, val_1034, val_1035, val_1036, val_1037, val_1038, val_1039, val_1040, val_1041, val_1042, val_1043, val_1044, val_1045, val_1046, val_1047, val_1048, val_1049, val_1050, val_1051, val_1052, val_1053, val_1054, val_1055, val_1056, val_1057, val_1058, val_1059, val_1060, val_1061, val_1062, val_1063, val_1064, val_1065, val_1066, val_1067, val_1068, val_1069, val_1070, val_1071, val_1072, val_1073, val_1074, val_1075, val_1076, val_1077, val_1078, val_1079, val_1080, val_1081, val_1082, val_1083, val_1084, val_1085, val_1086, val_1087, val_1088, val_1089, val_1090, val_1091, val_1092, val_1093, val_1094, val_1095, val_1096, val_1097, val_1098, val_1099, val_1100, val_1101, val_1102, val_1103, val_1104, val_1105, val_1106, val_1107, val_1108, val_1109, val_1110, val_1111, val_1112, val_1113, val_1114, val_1115, val_1116, val_1117, val_1118, val_1119, val_1120, val_1121, val_1122, val_1123, val_1124, val_1125, val_1126, val_1127, val_1128, val_1129, val_1130, val_1131, val_1132, val_1133, val_1134, val_1135, val_1136, val_1137, val_1138, val_1139, val_1140, val_1141, val_1142, val_1143, val_1144, val_1145, val_1146, val_1147, val_1148, val_1149, val_1150, val_1151, val_1152, val_1153, val_1154, val_1155, val_1156, val_1157, val_1158, val_1159, val_1160, val_1161, val_1162, val_1163, val_1164, val_1165, val_1166, val_1167, val_1168, val_1169, val_1170, val_1171, val_1172, val_1173, val_1174, val_1175, val_1176, val_1177, val_1178, val_1179, val_1180, val_1181, val_1182, val_1183, val_1184, val_1185, val_1186, val_1187, val_1188, val_1189, val_1190, val_1191, val_1192, val_1193, val_1194, val_1195, val_1196, val_1197, val_1198, val_1199, val_1200, val_1201, val_1202, val_1203, val_1204, val_1205, val_1206, val_1207, val_1208, val_1209, val_1210, val_1211, val_1212, val_1213, val_1214, val_1215, val_1216, val_1217, val_1218, val_1219, val_1220, val_1221, val_1222, val_1223, val_1224, val_1225, val_1226, val_1227, val_1228, val_1229, val_1230, val_1231, val_1232, val_1233, val_1234, val_1235, val_1236, val_1237, val_1238, val_1239, val_1240, val_1241, val_1242, val_1243, val_1244, val_1245, val_1246, val_1247, val_1248, val_1249, val_1250, val_1251, val_1252, val_1253, val_1254, val_1255, val_1256, val_1257, val_1258, val_1259, val_1260, val_1261, val_1262, val_1263, val_1264, val_1265, val_1266, val_1267, val_1268, val_1269, val_1270, val_1271, val_1272, val_1273, val_1274, val_1275, val_1276, val_1277, val_1278, val_1279, val_1280, val_1281, val_1282, val_1283, val_1284, val_1285, val_1286, val_1287, val_1288, val_1289, val_1290, val_1291, val_1292, val_1293, val_1294, val_1295, val_1296, val_1297, val_1298, val_1299, val_1300, val_1301, val_1302, val_1303, val_1304, val_1305, val_1306, val_1307, val_1308, val_1309, val_1310, val_1311, val_1312, val_1313, val_1314, val_1315, val_1316, val_1317, val_1318, val_1319, val_1320, val_1321, val_1322, val_1323, val_1324, val_1325, val_1326, val_1327, val_1328, val_1329, val_1330, val_1331, val_1332, val_1333, val_1334, val_1335, val_1336, val_1337, val_1338, val_1339, val_1340, val_1341, val_1342, val_1343, val_1344, val_1345, val_1346, val_1347, val_1348, val_1349, val_1350, val_1351, val_1352, val_1353, val_1354, val_1355, val_1356, val_1357, val_1358, val_1359, val_1360, val_1361, val_1362, val_1363, val_1364, val_1365, val_1366, val_1367, val_1368, val_1369, val_1370, val_1371, val_1372, val_1373, val_1374, val_1375, val_1376, val_1377, val_1378, val_1379, val_1380, val_1381, val_1382, val_1383, val_1384, val_1385, val_1386, val_1387, val_1388, val_1389, val_1390, val_1391, val_1392, val_1393, val_1394, val_1395, val_1396, val_1397, val_1398, val_1399, val_1400, val_1401, val_1402, val_1403, val_1404, val_1405, val_1406, val_1407, val_1408, val_1409, val_1410, val_1411, val_1412, val_1413, val_1414, val_1415, val_1416, val_1417, val_1418, val_1419, val_1420, val_1421, val_1422, val_1423, val_1424, val_1425, val_1426, val_1427, val_1428, val_1429, val_1430, val_1431, val_1432, val_1433, val_1434, val_1435, val_1436, val_1437, val_1438, val_1439, val_1440, val_1441, val_1442, val_1443, val_1444, val_1445, val_1446, val_1447, val_1448, val_1449, val_1450, val_1451, val_1452, val_1453, val_1454, val_1455, val_1456, val_1457, val_1458, val_1459, val_1460, val_1461, val_1462, val_1463, val_1464, val_1465, val_1466, val_1467, val_1468, val_1469, val_1470, val_1471, val_1472, val_1473, val_1474, val_1475, val_1476, val_1477, val_1478, val_1479, val_1480, val_1481, val_1482, val_1483, val_1484, val_1485, val_1486, val_1487, val_1488, val_1489, val_1490, val_1491, val_1492, val_1493, val_1494, val_1495, val_1496, val_1497, val_1498, val_1499, val_1500, val_1501, val_1502, val_1503, val_1504, val_1505, val_1506, val_1507, val_1508, val_1509, val_1510, val_1511, val_1512, val_1513, val_1514, val_1515, val_1516, val_1517, val_1518, val_1519, val_1520, val_1521, val_1522, val_1523, val_1524, val_1525, val_1526, val_1527, val_1528, val_1529, val_1530, val_1531, val_1532, val_1533, val_1534, val_1535, val_1536, val_1537, val_1538, val_1539, val_1540, val_1541, val_1542, val_1543, val_1544, val_1545, val_1546, val_1547, val_1548, val_1549, val_1550, val_1551, val_1552, val_1553, val_1554, val_1555, val_1556, val_1557, val_1558, val_1559, val_1560, val_1561, val_1562, val_1563, val_1564, val_1565, val_1566, val_1567, val_1568, val_1569;
for (int k=0; k<iter; k++) {
idx_off = i*(n_inputs * iter) + n_inputs*k;
val_146 = A[idx_off + 3] * A[idx_off + 69];
val_147 = A[idx_off + 0] * A[idx_off + 69];
val_148 = A[idx_off + 2] * A[idx_off + 70];
val_149 = A[idx_off + 1] * A[idx_off + 70];
val_150 = val_146 + val_148;
val_151 = val_147 + val_149;
val_152 = A[idx_off + 0] * A[idx_off + 104];
val_153 = A[idx_off + 4] * A[idx_off + 104];
val_154 = A[idx_off + 16] * A[idx_off + 105];
val_155 = A[idx_off + 3] * A[idx_off + 105];
val_156 = A[idx_off + 7] * A[idx_off + 106];
val_157 = A[idx_off + 4] * A[idx_off + 106];
val_158 = val_152 + val_154;
val_159 = val_153 + val_155;
val_160 = val_156 + val_158;
val_161 = val_157 + val_159;
val_162 = A[idx_off + 0] * A[idx_off + 41];
val_163 = A[idx_off + 1] * A[idx_off + 42];
val_164 = A[idx_off + 12] * val_162;
val_165 = A[idx_off + 0] * val_162;
val_166 = A[idx_off + 13] * val_163;
val_167 = A[idx_off + 14] * val_163;
val_168 = val_164 + val_166;
val_169 = val_165 + val_167;
val_170 = val_165 + val_166;
val_171 = A[idx_off + 5] * A[idx_off + 54];
val_172 = A[idx_off + 8] * A[idx_off + 54];
val_173 = A[idx_off + 0] * A[idx_off + 54];
val_174 = A[idx_off + 8] * A[idx_off + 55];
val_175 = A[idx_off + 5] * A[idx_off + 55];
val_176 = A[idx_off + 17] * A[idx_off + 55];
val_177 = A[idx_off + 0] * A[idx_off + 56];
val_178 = A[idx_off + 18] * A[idx_off + 56];
val_179 = val_171 + val_174;
val_180 = val_172 + val_175;
val_181 = val_173 + val_176;
val_182 = val_177 + val_179;
val_183 = val_177 + val_180;
val_184 = val_178 + val_181;
val_185 = A[idx_off + 5] * A[idx_off + 107];
val_186 = A[idx_off + 8] * A[idx_off + 108];
val_187 = A[idx_off + 0] * A[idx_off + 109];
val_188 = A[idx_off + 8] * A[idx_off + 107];
val_189 = A[idx_off + 5] * A[idx_off + 108];
val_190 = A[idx_off + 0] * A[idx_off + 107];
val_191 = A[idx_off + 5] * A[idx_off + 109];
val_192 = val_185 + val_186;
val_193 = val_187 + val_192;
val_194 = val_188 + val_189;
val_195 = val_187 + val_194;
val_196 = val_186 + val_190;
val_197 = val_191 + val_196;
val_198 = A[idx_off + 92] * val_182;
val_199 = A[idx_off + 93] * val_183;
val_200 = A[idx_off + 94] * val_184;
val_201 = val_193 * val_198;
val_202 = val_195 * val_199;
val_203 = val_197 * val_200;
val_204 = A[idx_off + 5] * val_201;
val_205 = A[idx_off + 0] * val_201;
val_206 = A[idx_off + 8] * val_202;
val_207 = A[idx_off + 15] * val_202;
val_208 = A[idx_off + 0] * val_203;
val_209 = A[idx_off + 3] * val_203;
val_210 = A[idx_off + 12] * val_201;
val_211 = A[idx_off + 4] * val_201;
val_212 = A[idx_off + 0] * val_202;
val_213 = A[idx_off + 3] * val_202;
val_214 = A[idx_off + 4] * val_203;
val_215 = val_204 + val_206;
val_216 = val_205 + val_207;
val_217 = val_208 + val_215;
val_218 = val_209 + val_216;
val_219 = val_210 + val_212;
val_220 = val_211 + val_213;
val_221 = val_208 + val_219;
val_222 = val_214 + val_220;
val_223 = A[idx_off + 4] * A[idx_off + 95];
val_224 = A[idx_off + 5] * A[idx_off + 96];
val_225 = val_150 * val_223;
val_226 = val_151 * val_224;
val_227 = val_217 * val_225;
val_228 = val_218 * val_226;
val_229 = val_221 * val_225;
val_230 = val_222 * val_226;
val_231 = A[idx_off + 12] * val_227;
val_232 = A[idx_off + 0] * val_227;
val_233 = A[idx_off + 20] * val_228;
val_234 = A[idx_off + 21] * val_228;
val_235 = A[idx_off + 0] * val_228;
val_236 = A[idx_off + 5] * val_229;
val_237 = A[idx_off + 8] * val_229;
val_238 = A[idx_off + 0] * val_229;
val_239 = A[idx_off + 4] * val_230;
val_240 = A[idx_off + 3] * val_230;
val_241 = val_231 + val_233;
val_242 = val_232 + val_234;
val_243 = val_232 + val_235;
val_244 = val_236 + val_239;
val_245 = val_237 + val_240;
val_246 = val_238 + val_239;
val_247 = A[idx_off + 6] * A[idx_off + 83];
val_248 = A[idx_off + 7] * A[idx_off + 84];
val_249 = val_241 * val_247;
val_250 = val_242 * val_247;
val_251 = val_243 * val_247;
val_252 = val_244 * val_248;
val_253 = val_245 * val_248;
val_254 = val_246 * val_248;
val_255 = val_249 + val_252;
val_256 = val_250 + val_253;
val_257 = val_251 + val_254;
val_258 = A[idx_off + 0] * A[idx_off + 114];
val_259 = A[idx_off + 1] * A[idx_off + 115];
val_260 = val_160 * val_258;
val_261 = val_161 * val_259;
val_262 = A[idx_off + 2] * val_260;
val_263 = A[idx_off + 3] * val_260;
val_264 = A[idx_off + 5] * val_261;
val_265 = A[idx_off + 4] * val_261;
val_266 = A[idx_off + 0] * val_260;
val_267 = A[idx_off + 1] * val_260;
val_268 = val_262 + val_264;
val_269 = val_263 + val_265;
val_270 = val_265 + val_266;
val_271 = val_264 + val_267;
val_272 = A[idx_off + 4] * A[idx_off + 101];
val_273 = A[idx_off + 3] * A[idx_off + 102];
val_274 = A[idx_off + 4] * A[idx_off + 103];
val_275 = A[idx_off + 4] * val_272;
val_276 = A[idx_off + 19] * val_272;
val_277 = A[idx_off + 0] * val_272;
val_278 = A[idx_off + 4] * val_273;
val_279 = A[idx_off + 0] * val_273;
val_280 = A[idx_off + 19] * val_273;
val_281 = A[idx_off + 4] * val_274;
val_282 = A[idx_off + 0] * val_274;
val_283 = A[idx_off + 19] * val_274;
val_284 = val_275 + val_278;
val_285 = val_276 + val_279;
val_286 = val_277 + val_280;
val_287 = val_277 + val_279;
val_288 = val_281 + val_284;
val_289 = val_282 + val_285;
val_290 = val_282 + val_286;
val_291 = val_283 + val_287;
val_292 = A[idx_off + 22] * A[idx_off + 80];
val_293 = A[idx_off + 22] * A[idx_off + 81];
val_294 = A[idx_off + 22] * A[idx_off + 82];
val_295 = A[idx_off + 0] * A[idx_off + 80];
val_296 = A[idx_off + 12] * A[idx_off + 81];
val_297 = A[idx_off + 0] * A[idx_off + 82];
val_298 = A[idx_off + 12] * A[idx_off + 80];
val_299 = A[idx_off + 0] * A[idx_off + 81];
val_300 = A[idx_off + 12] * A[idx_off + 82];
val_301 = val_292 + val_293;
val_302 = val_294 + val_301;
val_303 = val_295 + val_296;
val_304 = val_297 + val_303;
val_305 = val_298 + val_299;
val_306 = val_297 + val_305;
val_307 = val_295 + val_299;
val_308 = val_300 + val_307;
val_309 = A[idx_off + 4] * A[idx_off + 61];
val_310 = A[idx_off + 5] * A[idx_off + 62];
val_311 = A[idx_off + 12] * val_309;
val_312 = A[idx_off + 0] * val_309;
val_313 = A[idx_off + 13] * val_309;
val_314 = A[idx_off + 14] * val_309;
val_315 = A[idx_off + 14] * val_310;
val_316 = A[idx_off + 23] * val_310;
val_317 = A[idx_off + 0] * val_310;
val_318 = A[idx_off + 12] * val_310;
val_319 = val_311 + val_315;
val_320 = val_312 + val_316;
val_321 = val_312 + val_317;
val_322 = val_313 + val_318;
val_323 = val_314 + val_317;
val_324 = val_313 + val_317;
val_325 = val_311 + val_317;
val_326 = val_312 + val_318;
val_327 = A[idx_off + 74] * val_319;
val_328 = A[idx_off + 75] * val_320;
val_329 = A[idx_off + 76] * val_321;
val_330 = A[idx_off + 74] * val_322;
val_331 = A[idx_off + 75] * val_323;
val_332 = A[idx_off + 76] * val_324;
val_333 = A[idx_off + 74] * val_321;
val_334 = A[idx_off + 75] * val_325;
val_335 = A[idx_off + 76] * val_326;
val_336 = val_327 + val_328;
val_337 = val_329 + val_336;
val_338 = val_330 + val_331;
val_339 = val_332 + val_338;
val_340 = val_333 + val_334;
val_341 = val_335 + val_340;
val_342 = A[idx_off + 22] * A[idx_off + 77];
val_343 = A[idx_off + 22] * A[idx_off + 78];
val_344 = A[idx_off + 22] * A[idx_off + 79];
val_345 = A[idx_off + 0] * A[idx_off + 77];
val_346 = A[idx_off + 12] * A[idx_off + 78];
val_347 = A[idx_off + 0] * A[idx_off + 79];
val_348 = A[idx_off + 12] * A[idx_off + 77];
val_349 = A[idx_off + 0] * A[idx_off + 78];
val_350 = A[idx_off + 12] * A[idx_off + 79];
val_351 = val_342 + val_343;
val_352 = val_344 + val_351;
val_353 = val_345 + val_346;
val_354 = val_347 + val_353;
val_355 = val_348 + val_349;
val_356 = val_347 + val_355;
val_357 = val_345 + val_349;
val_358 = val_350 + val_357;
val_359 = A[idx_off + 2] * A[idx_off + 59];
val_360 = A[idx_off + 3] * A[idx_off + 60];
val_361 = val_302 * val_359;
val_362 = val_304 * val_359;
val_363 = val_302 * val_360;
val_364 = val_306 * val_360;
val_365 = val_308 * val_360;
val_366 = val_352 * val_361;
val_367 = val_354 * val_362;
val_368 = val_352 * val_363;
val_369 = val_356 * val_364;
val_370 = val_358 * val_365;
val_371 = val_366 + val_368;
val_372 = val_366 + val_369;
val_373 = val_367 + val_370;
val_374 = A[idx_off + 4] * A[idx_off + 67];
val_375 = A[idx_off + 5] * A[idx_off + 68];
val_376 = A[idx_off + 5] * val_374;
val_377 = A[idx_off + 0] * val_374;
val_378 = A[idx_off + 8] * val_374;
val_379 = A[idx_off + 1] * val_375;
val_380 = A[idx_off + 5] * val_375;
val_381 = A[idx_off + 0] * val_375;
val_382 = A[idx_off + 8] * val_375;
val_383 = A[idx_off + 12] * val_375;
val_384 = val_374 + val_379;
val_385 = val_376 + val_380;
val_386 = val_374 + val_380;
val_387 = val_377 + val_381;
val_388 = val_378 + val_382;
val_389 = val_376 + val_381;
val_390 = val_378 + val_383;
val_391 = A[idx_off + 124] * val_255;
val_392 = A[idx_off + 125] * val_256;
val_393 = A[idx_off + 126] * val_257;
val_394 = A[idx_off + 12] * val_391;
val_395 = A[idx_off + 5] * val_392;
val_396 = A[idx_off + 13] * val_393;
val_397 = A[idx_off + 5] * val_391;
val_398 = A[idx_off + 8] * val_392;
val_399 = A[idx_off + 0] * val_393;
val_400 = A[idx_off + 7] * val_391;
val_401 = A[idx_off + 0] * val_392;
val_402 = A[idx_off + 0] * val_391;
val_403 = A[idx_off + 24] * val_393;
val_404 = A[idx_off + 8] * val_391;
val_405 = A[idx_off + 16] * val_391;
val_406 = A[idx_off + 12] * val_393;
val_407 = val_394 + val_395;
val_408 = val_396 + val_407;
val_409 = val_397 + val_398;
val_410 = val_399 + val_409;
val_411 = val_400 + val_401;
val_412 = val_399 + val_411;
val_413 = val_398 + val_402;
val_414 = val_403 + val_413;
val_415 = val_395 + val_404;
val_416 = val_396 + val_415;
val_417 = val_398 + val_405;
val_418 = val_399 + val_417;
val_419 = val_401 + val_402;
val_420 = val_399 + val_419;
val_421 = val_403 + val_419;
val_422 = val_395 + val_402;
val_423 = val_406 + val_422;
val_424 = A[idx_off + 12] * A[idx_off + 46];
val_425 = A[idx_off + 13] * A[idx_off + 46];
val_426 = A[idx_off + 2] * A[idx_off + 46];
val_427 = A[idx_off + 4] * A[idx_off + 46];
val_428 = A[idx_off + 3] * A[idx_off + 46];
val_429 = A[idx_off + 0] * A[idx_off + 46];
val_430 = A[idx_off + 0] * A[idx_off + 47];
val_431 = A[idx_off + 25] * A[idx_off + 47];
val_432 = A[idx_off + 26] * A[idx_off + 47];
val_433 = A[idx_off + 14] * A[idx_off + 47];
val_434 = A[idx_off + 15] * A[idx_off + 47];
val_435 = A[idx_off + 6] * A[idx_off + 47];
val_436 = A[idx_off + 0] * A[idx_off + 48];
val_437 = A[idx_off + 2] * A[idx_off + 48];
val_438 = A[idx_off + 4] * A[idx_off + 48];
val_439 = A[idx_off + 27] * A[idx_off + 48];
val_440 = A[idx_off + 28] * A[idx_off + 48];
val_441 = A[idx_off + 3] * A[idx_off + 48];
val_442 = val_424 + val_430;
val_443 = val_425 + val_431;
val_444 = val_426 + val_432;
val_445 = val_427 + val_433;
val_446 = val_428 + val_434;
val_447 = val_427 + val_435;
val_448 = val_429 + val_434;
val_449 = val_436 + val_442;
val_450 = val_437 + val_443;
val_451 = val_438 + val_444;
val_452 = val_439 + val_445;
val_453 = val_436 + val_446;
val_454 = val_440 + val_447;
val_455 = val_441 + val_448;
val_456 = A[idx_off + 138] * val_288;
val_457 = A[idx_off + 139] * val_289;
val_458 = A[idx_off + 140] * val_290;
val_459 = A[idx_off + 141] * val_291;
val_460 = A[idx_off + 29] * val_456;
val_461 = A[idx_off + 0] * val_456;
val_462 = A[idx_off + 29] * val_457;
val_463 = A[idx_off + 0] * val_457;
val_464 = A[idx_off + 29] * val_458;
val_465 = A[idx_off + 0] * val_458;
val_466 = A[idx_off + 0] * val_459;
val_467 = A[idx_off + 29] * val_459;
val_468 = val_460 + val_462;
val_469 = val_461 + val_463;
val_470 = val_464 + val_468;
val_471 = val_465 + val_469;
val_472 = val_466 + val_470;
val_473 = val_466 + val_471;
val_474 = val_467 + val_471;
val_475 = val_465 + val_468;
val_476 = val_464 + val_469;
val_477 = val_466 + val_475;
val_478 = val_466 + val_476;
val_479 = A[idx_off + 2] * A[idx_off + 57];
val_480 = A[idx_off + 3] * A[idx_off + 58];
val_481 = val_472 * val_479;
val_482 = val_473 * val_479;
val_483 = val_474 * val_479;
val_484 = val_477 * val_480;
val_485 = val_478 * val_480;
val_486 = val_473 * val_480;
val_487 = val_474 * val_480;
val_488 = val_481 + val_484;
val_489 = val_482 + val_485;
val_490 = val_483 + val_486;
val_491 = val_482 + val_487;
val_492 = A[idx_off + 29] * A[idx_off + 63];
val_493 = A[idx_off + 0] * A[idx_off + 63];
val_494 = A[idx_off + 0] * A[idx_off + 64];
val_495 = A[idx_off + 29] * A[idx_off + 64];
val_496 = A[idx_off + 0] * A[idx_off + 65];
val_497 = A[idx_off + 29] * A[idx_off + 65];
val_498 = A[idx_off + 0] * A[idx_off + 66];
val_499 = A[idx_off + 29] * A[idx_off + 66];
val_500 = val_492 + val_494;
val_501 = val_493 + val_495;
val_502 = val_493 + val_494;
val_503 = val_496 + val_500;
val_504 = val_496 + val_501;
val_505 = val_497 + val_502;
val_506 = val_496 + val_502;
val_507 = val_498 + val_503;
val_508 = val_498 + val_504;
val_509 = val_498 + val_505;
val_510 = val_499 + val_506;
val_511 = A[idx_off + 29] * A[idx_off + 97];
val_512 = A[idx_off + 0] * A[idx_off + 97];
val_513 = A[idx_off + 20] * A[idx_off + 97];
val_514 = A[idx_off + 0] * A[idx_off + 98];
val_515 = A[idx_off + 30] * A[idx_off + 98];
val_516 = A[idx_off + 29] * A[idx_off + 98];
val_517 = A[idx_off + 0] * A[idx_off + 99];
val_518 = A[idx_off + 0] * A[idx_off + 100];
val_519 = A[idx_off + 29] * A[idx_off + 100];
val_520 = A[idx_off + 29] * A[idx_off + 99];
val_521 = A[idx_off + 25] * A[idx_off + 97];
val_522 = A[idx_off + 31] * A[idx_off + 98];
val_523 = val_511 + val_514;
val_524 = val_512 + val_514;
val_525 = val_513 + val_515;
val_526 = val_512 + val_516;
val_527 = val_517 + val_523;
val_528 = val_517 + val_524;
val_529 = val_517 + val_525;
val_530 = val_517 + val_526;
val_531 = val_518 + val_527;
val_532 = val_519 + val_528;
val_533 = val_518 + val_529;
val_534 = val_518 + val_530;
val_535 = val_520 + val_524;
val_536 = val_518 + val_535;
val_537 = val_521 + val_522;
val_538 = val_517 + val_537;
val_539 = val_518 + val_538;
val_540 = A[idx_off + 29] * A[idx_off + 110];
val_541 = A[idx_off + 4] * A[idx_off + 110];
val_542 = A[idx_off + 6] * A[idx_off + 110];
val_543 = A[idx_off + 0] * A[idx_off + 111];
val_544 = A[idx_off + 34] * A[idx_off + 111];
val_545 = A[idx_off + 28] * A[idx_off + 111];
val_546 = A[idx_off + 0] * A[idx_off + 112];
val_547 = A[idx_off + 34] * A[idx_off + 112];
val_548 = A[idx_off + 8] * A[idx_off + 112];
val_549 = A[idx_off + 0] * A[idx_off + 113];
val_550 = A[idx_off + 35] * A[idx_off + 113];
val_551 = A[idx_off + 0] * A[idx_off + 110];
val_552 = A[idx_off + 17] * A[idx_off + 111];
val_553 = A[idx_off + 3] * A[idx_off + 111];
val_554 = A[idx_off + 13] * A[idx_off + 112];
val_555 = A[idx_off + 36] * A[idx_off + 112];
val_556 = A[idx_off + 29] * A[idx_off + 113];
val_557 = A[idx_off + 14] * A[idx_off + 113];
val_558 = A[idx_off + 13] * A[idx_off + 111];
val_559 = A[idx_off + 37] * A[idx_off + 111];
val_560 = A[idx_off + 29] * A[idx_off + 111];
val_561 = A[idx_off + 18] * A[idx_off + 111];
val_562 = A[idx_off + 21] * A[idx_off + 112];
val_563 = A[idx_off + 15] * A[idx_off + 112];
val_564 = A[idx_off + 6] * A[idx_off + 113];
val_565 = A[idx_off + 23] * A[idx_off + 113];
val_566 = A[idx_off + 31] * A[idx_off + 112];
val_567 = A[idx_off + 3] * A[idx_off + 113];
val_568 = A[idx_off + 25] * A[idx_off + 113];
val_569 = A[idx_off + 29] * A[idx_off + 112];
val_570 = A[idx_off + 2] * A[idx_off + 110];
val_571 = A[idx_off + 14] * A[idx_off + 110];
val_572 = A[idx_off + 38] * A[idx_off + 111];
val_573 = A[idx_off + 39] * A[idx_off + 111];
val_574 = A[idx_off + 4] * A[idx_off + 112];
val_575 = val_540 + val_543;
val_576 = val_541 + val_544;
val_577 = val_542 + val_545;
val_578 = val_546 + val_575;
val_579 = val_547 + val_576;
val_580 = val_548 + val_577;
val_581 = val_549 + val_578;
val_582 = val_550 + val_579;
val_583 = val_549 + val_580;
val_584 = val_543 + val_551;
val_585 = val_551 + val_552;
val_586 = val_551 + val_553;
val_587 = val_546 + val_584;
val_588 = val_554 + val_585;
val_589 = val_555 + val_586;
val_590 = val_556 + val_587;
val_591 = val_557 + val_588;
val_592 = val_549 + val_589;
val_593 = val_551 + val_558;
val_594 = val_551 + val_559;
val_595 = val_551 + val_560;
val_596 = val_542 + val_561;
val_597 = val_562 + val_593;
val_598 = val_547 + val_594;
val_599 = val_546 + val_595;
val_600 = val_563 + val_596;
val_601 = val_564 + val_597;
val_602 = val_565 + val_598;
val_603 = val_549 + val_599;
val_604 = val_549 + val_600;
val_605 = val_555 + val_584;
val_606 = val_566 + val_584;
val_607 = val_567 + val_605;
val_608 = val_568 + val_606;
val_609 = val_569 + val_584;
val_610 = val_549 + val_609;
val_611 = val_570 + val_572;
val_612 = val_571 + val_573;
val_613 = val_574 + val_611;
val_614 = val_546 + val_612;
val_615 = val_549 + val_613;
val_616 = val_549 + val_614;
val_617 = A[idx_off + 142] * val_488;
val_618 = A[idx_off + 143] * val_489;
val_619 = A[idx_off + 144] * val_490;
val_620 = A[idx_off + 145] * val_491;
val_621 = val_581 * val_617;
val_622 = val_582 * val_618;
val_623 = val_581 * val_619;
val_624 = val_583 * val_620;
val_625 = val_590 * val_617;
val_626 = val_591 * val_618;
val_627 = val_590 * val_619;
val_628 = val_592 * val_620;
val_629 = val_601 * val_617;
val_630 = val_602 * val_618;
val_631 = val_603 * val_619;
val_632 = val_604 * val_620;
val_633 = val_607 * val_618;
val_634 = val_608 * val_620;
val_635 = val_607 * val_617;
val_636 = val_581 * val_618;
val_637 = val_610 * val_619;
val_638 = val_581 * val_620;
val_639 = val_615 * val_617;
val_640 = val_590 * val_618;
val_641 = val_616 * val_619;
val_642 = val_590 * val_620;
val_643 = A[idx_off + 29] * val_621;
val_644 = A[idx_off + 29] * val_622;
val_645 = A[idx_off + 29] * val_623;
val_646 = A[idx_off + 29] * val_624;
val_647 = A[idx_off + 0] * val_621;
val_648 = A[idx_off + 0] * val_622;
val_649 = A[idx_off + 0] * val_623;
val_650 = A[idx_off + 0] * val_624;
val_651 = A[idx_off + 13] * val_625;
val_652 = A[idx_off + 5] * val_626;
val_653 = A[idx_off + 0] * val_627;
val_654 = A[idx_off + 0] * val_628;
val_655 = A[idx_off + 40] * val_625;
val_656 = A[idx_off + 11] * val_626;
val_657 = A[idx_off + 29] * val_628;
val_658 = A[idx_off + 0] * val_625;
val_659 = A[idx_off + 0] * val_626;
val_660 = A[idx_off + 29] * val_627;
val_661 = A[idx_off + 5] * val_629;
val_662 = A[idx_off + 29] * val_630;
val_663 = A[idx_off + 0] * val_631;
val_664 = A[idx_off + 29] * val_632;
val_665 = A[idx_off + 11] * val_629;
val_666 = A[idx_off + 0] * val_630;
val_667 = A[idx_off + 29] * val_631;
val_668 = A[idx_off + 0] * val_632;
val_669 = A[idx_off + 0] * val_629;
val_670 = A[idx_off + 20] * val_633;
val_671 = A[idx_off + 0] * val_634;
val_672 = A[idx_off + 30] * val_633;
val_673 = A[idx_off + 0] * val_633;
val_674 = A[idx_off + 29] * val_634;
val_675 = A[idx_off + 14] * val_635;
val_676 = A[idx_off + 29] * val_636;
val_677 = A[idx_off + 0] * val_637;
val_678 = A[idx_off + 29] * val_638;
val_679 = A[idx_off + 39] * val_635;
val_680 = A[idx_off + 0] * val_636;
val_681 = A[idx_off + 0] * val_638;
val_682 = A[idx_off + 0] * val_635;
val_683 = A[idx_off + 29] * val_637;
val_684 = A[idx_off + 29] * val_639;
val_685 = A[idx_off + 13] * val_640;
val_686 = A[idx_off + 29] * val_641;
val_687 = A[idx_off + 0] * val_642;
val_688 = A[idx_off + 0] * val_639;
val_689 = A[idx_off + 40] * val_640;
val_690 = A[idx_off + 0] * val_641;
val_691 = A[idx_off + 0] * val_640;
val_692 = A[idx_off + 29] * val_642;
val_693 = val_643 + val_644;
val_694 = val_645 + val_693;
val_695 = val_646 + val_694;
val_696 = val_647 + val_648;
val_697 = val_649 + val_696;
val_698 = val_650 + val_697;
val_699 = val_651 + val_652;
val_700 = val_653 + val_699;
val_701 = val_654 + val_700;
val_702 = val_655 + val_656;
val_703 = val_653 + val_702;
val_704 = val_657 + val_703;
val_705 = val_658 + val_659;
val_706 = val_653 + val_705;
val_707 = val_654 + val_706;
val_708 = val_660 + val_705;
val_709 = val_654 + val_708;
val_710 = val_661 + val_662;
val_711 = val_663 + val_710;
val_712 = val_664 + val_711;
val_713 = val_665 + val_666;
val_714 = val_667 + val_713;
val_715 = val_668 + val_714;
val_716 = val_666 + val_669;
val_717 = val_663 + val_716;
val_718 = val_668 + val_717;
val_719 = val_643 + val_670;
val_720 = val_645 + val_719;
val_721 = val_671 + val_720;
val_722 = val_647 + val_672;
val_723 = val_649 + val_722;
val_724 = val_671 + val_723;
val_725 = val_647 + val_673;
val_726 = val_649 + val_725;
val_727 = val_674 + val_726;
val_728 = val_671 + val_726;
val_729 = val_675 + val_676;
val_730 = val_677 + val_729;
val_731 = val_678 + val_730;
val_732 = val_679 + val_680;
val_733 = val_677 + val_732;
val_734 = val_681 + val_733;
val_735 = val_680 + val_682;
val_736 = val_683 + val_735;
val_737 = val_681 + val_736;
val_738 = val_677 + val_735;
val_739 = val_681 + val_738;
val_740 = val_684 + val_685;
val_741 = val_686 + val_740;
val_742 = val_687 + val_741;
val_743 = val_688 + val_689;
val_744 = val_690 + val_743;
val_745 = val_687 + val_744;
val_746 = val_688 + val_691;
val_747 = val_690 + val_746;
val_748 = val_687 + val_747;
val_749 = val_692 + val_747;
val_750 = A[idx_off + 8] * A[idx_off + 90];
val_751 = A[idx_off + 9] * A[idx_off + 91];
val_752 = val_695 * val_750;
val_753 = val_698 * val_750;
val_754 = val_701 * val_751;
val_755 = val_704 * val_751;
val_756 = val_707 * val_751;
val_757 = val_709 * val_751;
val_758 = val_712 * val_750;
val_759 = val_715 * val_750;
val_760 = val_718 * val_750;
val_761 = val_721 * val_751;
val_762 = val_724 * val_751;
val_763 = val_727 * val_751;
val_764 = val_728 * val_751;
val_765 = val_731 * val_750;
val_766 = val_734 * val_750;
val_767 = val_737 * val_750;
val_768 = val_739 * val_750;
val_769 = val_742 * val_751;
val_770 = val_745 * val_751;
val_771 = val_748 * val_751;
val_772 = val_749 * val_751;
val_773 = val_752 + val_754;
val_774 = val_753 + val_755;
val_775 = val_753 + val_756;
val_776 = val_753 + val_757;
val_777 = val_758 + val_761;
val_778 = val_759 + val_762;
val_779 = val_760 + val_763;
val_780 = val_760 + val_764;
val_781 = val_765 + val_769;
val_782 = val_766 + val_770;
val_783 = val_767 + val_771;
val_784 = val_768 + val_772;
val_785 = A[idx_off + 2] * A[idx_off + 85];
val_786 = A[idx_off + 3] * A[idx_off + 86];
val_787 = A[idx_off + 0] * val_785;
val_788 = A[idx_off + 18] * val_785;
val_789 = A[idx_off + 4] * val_785;
val_790 = A[idx_off + 5] * val_785;
val_791 = A[idx_off + 0] * val_786;
val_792 = A[idx_off + 4] * val_786;
val_793 = A[idx_off + 18] * val_786;
val_794 = A[idx_off + 5] * val_786;
val_795 = A[idx_off + 1] * val_785;
val_796 = A[idx_off + 13] * val_785;
val_797 = A[idx_off + 1] * val_786;
val_798 = A[idx_off + 13] * val_786;
val_799 = A[idx_off + 2] * val_785;
val_800 = A[idx_off + 2] * val_786;
val_801 = A[idx_off + 3] * val_785;
val_802 = A[idx_off + 3] * val_786;
val_803 = val_787 + val_791;
val_804 = val_787 + val_792;
val_805 = val_788 + val_793;
val_806 = val_789 + val_792;
val_807 = val_788 + val_794;
val_808 = val_790 + val_794;
val_809 = val_795 + val_797;
val_810 = val_794 + val_795;
val_811 = val_796 + val_798;
val_812 = val_792 + val_796;
val_813 = val_788 + val_797;
val_814 = val_791 + val_796;
val_815 = val_799 + val_800;
val_816 = val_798 + val_799;
val_817 = val_801 + val_802;
val_818 = val_793 + val_801;
val_819 = A[idx_off + 71] * val_337;
val_820 = A[idx_off + 72] * val_339;
val_821 = A[idx_off + 73] * val_341;
val_822 = val_371 * val_819;
val_823 = val_372 * val_820;
val_824 = val_373 * val_821;
val_825 = A[idx_off + 4] * val_822;
val_826 = A[idx_off + 3] * val_823;
val_827 = A[idx_off + 4] * val_824;
val_828 = A[idx_off + 0] * val_822;
val_829 = A[idx_off + 15] * val_823;
val_830 = A[idx_off + 3] * val_824;
val_831 = val_408 * val_825;
val_832 = val_410 * val_826;
val_833 = val_412 * val_827;
val_834 = val_414 * val_825;
val_835 = val_416 * val_826;
val_836 = val_418 * val_827;
val_837 = val_420 * val_825;
val_838 = val_421 * val_826;
val_839 = val_423 * val_827;
val_840 = val_408 * val_828;
val_841 = val_410 * val_829;
val_842 = val_412 * val_830;
val_843 = val_414 * val_828;
val_844 = val_416 * val_829;
val_845 = val_418 * val_830;
val_846 = val_420 * val_828;
val_847 = val_421 * val_829;
val_848 = val_423 * val_830;
val_849 = val_831 + val_832;
val_850 = val_833 + val_849;
val_851 = val_834 + val_835;
val_852 = val_836 + val_851;
val_853 = val_837 + val_838;
val_854 = val_839 + val_853;
val_855 = val_840 + val_841;
val_856 = val_842 + val_855;
val_857 = val_843 + val_844;
val_858 = val_845 + val_857;
val_859 = val_846 + val_847;
val_860 = val_848 + val_859;
val_861 = A[idx_off + 51] * val_850;
val_862 = A[idx_off + 52] * val_852;
val_863 = A[idx_off + 53] * val_854;
val_864 = A[idx_off + 51] * val_856;
val_865 = A[idx_off + 52] * val_858;
val_866 = A[idx_off + 53] * val_860;
val_867 = val_449 * val_861;
val_868 = val_450 * val_861;
val_869 = val_449 * val_862;
val_870 = val_451 * val_862;
val_871 = val_452 * val_862;
val_872 = val_453 * val_863;
val_873 = val_454 * val_863;
val_874 = val_455 * val_863;
val_875 = val_449 * val_864;
val_876 = val_450 * val_864;
val_877 = val_449 * val_865;
val_878 = val_451 * val_865;
val_879 = val_452 * val_865;
val_880 = val_453 * val_866;
val_881 = val_454 * val_866;
val_882 = val_455 * val_866;
val_883 = val_867 + val_869;
val_884 = val_867 + val_870;
val_885 = val_868 + val_871;
val_886 = val_872 + val_883;
val_887 = val_873 + val_884;
val_888 = val_874 + val_885;
val_889 = val_875 + val_877;
val_890 = val_875 + val_878;
val_891 = val_876 + val_879;
val_892 = val_880 + val_889;
val_893 = val_881 + val_890;
val_894 = val_882 + val_891;
val_895 = A[idx_off + 127] * val_168;
val_896 = A[idx_off + 128] * val_169;
val_897 = A[idx_off + 129] * val_170;
val_898 = val_886 * val_895;
val_899 = val_887 * val_896;
val_900 = val_888 * val_897;
val_901 = val_892 * val_895;
val_902 = val_893 * val_896;
val_903 = val_894 * val_897;
val_904 = val_803 * val_898;
val_905 = val_804 * val_899;
val_906 = val_805 * val_900;
val_907 = val_806 * val_899;
val_908 = val_807 * val_900;
val_909 = val_804 * val_898;
val_910 = val_808 * val_900;
val_911 = val_809 * val_901;
val_912 = val_810 * val_902;
val_913 = val_811 * val_903;
val_914 = val_808 * val_902;
val_915 = val_812 * val_903;
val_916 = val_810 * val_901;
val_917 = val_806 * val_903;
val_918 = val_813 * val_900;
val_919 = val_809 * val_900;
val_920 = val_814 * val_903;
val_921 = val_803 * val_903;
val_922 = val_803 * val_899;
val_923 = val_815 * val_900;
val_924 = val_816 * val_900;
val_925 = val_811 * val_900;
val_926 = val_809 * val_902;
val_927 = val_817 * val_903;
val_928 = val_818 * val_903;
val_929 = val_805 * val_903;
val_930 = val_904 + val_905;
val_931 = val_906 + val_930;
val_932 = val_904 + val_907;
val_933 = val_908 + val_932;
val_934 = val_907 + val_909;
val_935 = val_910 + val_934;
val_936 = val_911 + val_912;
val_937 = val_913 + val_936;
val_938 = val_911 + val_914;
val_939 = val_915 + val_938;
val_940 = val_914 + val_916;
val_941 = val_917 + val_940;
val_942 = val_918 + val_932;
val_943 = val_919 + val_934;
val_944 = val_920 + val_938;
val_945 = val_921 + val_940;
val_946 = val_904 + val_922;
val_947 = val_923 + val_946;
val_948 = val_924 + val_946;
val_949 = val_925 + val_946;
val_950 = val_911 + val_926;
val_951 = val_927 + val_950;
val_952 = val_928 + val_950;
val_953 = val_929 + val_950;
val_954 = A[idx_off + 49] * val_931;
val_955 = A[idx_off + 49] * val_933;
val_956 = A[idx_off + 49] * val_935;
val_957 = A[idx_off + 50] * val_937;
val_958 = A[idx_off + 50] * val_939;
val_959 = A[idx_off + 50] * val_941;
val_960 = A[idx_off + 49] * val_942;
val_961 = A[idx_off + 49] * val_943;
val_962 = A[idx_off + 50] * val_944;
val_963 = A[idx_off + 50] * val_945;
val_964 = A[idx_off + 49] * val_947;
val_965 = A[idx_off + 49] * val_948;
val_966 = A[idx_off + 49] * val_949;
val_967 = A[idx_off + 50] * val_951;
val_968 = A[idx_off + 50] * val_952;
val_969 = A[idx_off + 50] * val_953;
val_970 = val_954 + val_957;
val_971 = val_955 + val_958;
val_972 = val_956 + val_959;
val_973 = val_960 + val_962;
val_974 = val_961 + val_963;
val_975 = val_964 + val_967;
val_976 = val_965 + val_968;
val_977 = val_966 + val_969;
val_978 = A[idx_off + 134] * val_507;
val_979 = A[idx_off + 135] * val_508;
val_980 = A[idx_off + 136] * val_509;
val_981 = A[idx_off + 137] * val_510;
val_982 = A[idx_off + 134] * val_508;
val_983 = A[idx_off + 135] * val_507;
val_984 = A[idx_off + 135] * val_509;
val_985 = A[idx_off + 136] * val_507;
val_986 = val_531 * val_978;
val_987 = val_532 * val_979;
val_988 = val_533 * val_980;
val_989 = val_534 * val_981;
val_990 = val_534 * val_978;
val_991 = val_531 * val_979;
val_992 = val_536 * val_981;
val_993 = val_536 * val_978;
val_994 = val_539 * val_979;
val_995 = val_531 * val_980;
val_996 = val_532 * val_981;
val_997 = val_531 * val_982;
val_998 = val_532 * val_983;
val_999 = val_534 * val_982;
val_1000 = val_531 * val_983;
val_1001 = val_536 * val_982;
val_1002 = val_539 * val_983;
val_1003 = val_532 * val_984;
val_1004 = val_533 * val_985;
val_1005 = val_531 * val_984;
val_1006 = val_539 * val_984;
val_1007 = val_531 * val_985;
val_1008 = val_773 * val_986;
val_1009 = val_774 * val_987;
val_1010 = val_775 * val_988;
val_1011 = val_776 * val_989;
val_1012 = val_777 * val_990;
val_1013 = val_778 * val_991;
val_1014 = val_779 * val_988;
val_1015 = val_780 * val_992;
val_1016 = val_781 * val_993;
val_1017 = val_782 * val_994;
val_1018 = val_783 * val_995;
val_1019 = val_784 * val_996;
val_1020 = val_773 * val_997;
val_1021 = val_774 * val_998;
val_1022 = val_777 * val_999;
val_1023 = val_778 * val_1000;
val_1024 = val_781 * val_1001;
val_1025 = val_782 * val_1002;
val_1026 = val_774 * val_1003;
val_1027 = val_775 * val_1004;
val_1028 = val_778 * val_1005;
val_1029 = val_779 * val_1004;
val_1030 = val_782 * val_1006;
val_1031 = val_783 * val_1007;
val_1032 = A[idx_off + 29] * val_1008;
val_1033 = A[idx_off + 0] * val_1009;
val_1034 = A[idx_off + 0] * val_1010;
val_1035 = A[idx_off + 11] * val_1011;
val_1036 = A[idx_off + 0] * val_1008;
val_1037 = A[idx_off + 5] * val_1011;
val_1038 = A[idx_off + 29] * val_1010;
val_1039 = A[idx_off + 0] * val_1011;
val_1040 = A[idx_off + 29] * val_1009;
val_1041 = A[idx_off + 0] * val_1012;
val_1042 = A[idx_off + 29] * val_1013;
val_1043 = A[idx_off + 0] * val_1014;
val_1044 = A[idx_off + 0] * val_1015;
val_1045 = A[idx_off + 29] * val_1012;
val_1046 = A[idx_off + 0] * val_1013;
val_1047 = A[idx_off + 32] * val_1015;
val_1048 = A[idx_off + 8] * val_1015;
val_1049 = A[idx_off + 29] * val_1014;
val_1050 = A[idx_off + 0] * val_1016;
val_1051 = A[idx_off + 0] * val_1017;
val_1052 = A[idx_off + 29] * val_1018;
val_1053 = A[idx_off + 0] * val_1019;
val_1054 = A[idx_off + 29] * val_1017;
val_1055 = A[idx_off + 0] * val_1018;
val_1056 = A[idx_off + 33] * val_1019;
val_1057 = A[idx_off + 29] * val_1016;
val_1058 = A[idx_off + 2] * val_1019;
val_1059 = A[idx_off + 29] * val_1020;
val_1060 = A[idx_off + 0] * val_1021;
val_1061 = A[idx_off + 0] * val_1020;
val_1062 = A[idx_off + 29] * val_1021;
val_1063 = A[idx_off + 0] * val_1022;
val_1064 = A[idx_off + 29] * val_1023;
val_1065 = A[idx_off + 29] * val_1022;
val_1066 = A[idx_off + 0] * val_1023;
val_1067 = A[idx_off + 0] * val_1024;
val_1068 = A[idx_off + 0] * val_1025;
val_1069 = A[idx_off + 29] * val_1025;
val_1070 = A[idx_off + 29] * val_1024;
val_1071 = A[idx_off + 0] * val_1026;
val_1072 = A[idx_off + 0] * val_1027;
val_1073 = A[idx_off + 29] * val_1027;
val_1074 = A[idx_off + 29] * val_1026;
val_1075 = A[idx_off + 29] * val_1028;
val_1076 = A[idx_off + 0] * val_1029;
val_1077 = A[idx_off + 0] * val_1028;
val_1078 = A[idx_off + 29] * val_1029;
val_1079 = A[idx_off + 0] * val_1030;
val_1080 = A[idx_off + 29] * val_1031;
val_1081 = A[idx_off + 29] * val_1030;
val_1082 = A[idx_off + 0] * val_1031;
val_1083 = val_1032 + val_1033;
val_1084 = val_1034 + val_1083;
val_1085 = val_1035 + val_1084;
val_1086 = val_1033 + val_1036;
val_1087 = val_1034 + val_1086;
val_1088 = val_1037 + val_1087;
val_1089 = val_1038 + val_1086;
val_1090 = val_1039 + val_1089;
val_1091 = val_1036 + val_1040;
val_1092 = val_1034 + val_1091;
val_1093 = val_1039 + val_1092;
val_1094 = val_1041 + val_1042;
val_1095 = val_1043 + val_1094;
val_1096 = val_1044 + val_1095;
val_1097 = val_1045 + val_1046;
val_1098 = val_1043 + val_1097;
val_1099 = val_1047 + val_1098;
val_1100 = val_1041 + val_1046;
val_1101 = val_1043 + val_1100;
val_1102 = val_1048 + val_1101;
val_1103 = val_1049 + val_1100;
val_1104 = val_1044 + val_1103;
val_1105 = val_1050 + val_1051;
val_1106 = val_1052 + val_1105;
val_1107 = val_1053 + val_1106;
val_1108 = val_1050 + val_1054;
val_1109 = val_1055 + val_1108;
val_1110 = val_1056 + val_1109;
val_1111 = val_1051 + val_1057;
val_1112 = val_1055 + val_1111;
val_1113 = val_1058 + val_1112;
val_1114 = val_1055 + val_1105;
val_1115 = val_1053 + val_1114;
val_1116 = val_1059 + val_1060;
val_1117 = val_1034 + val_1116;
val_1118 = val_1035 + val_1117;
val_1119 = val_1060 + val_1061;
val_1120 = val_1034 + val_1119;
val_1121 = val_1037 + val_1120;
val_1122 = val_1038 + val_1119;
val_1123 = val_1039 + val_1122;
val_1124 = val_1061 + val_1062;
val_1125 = val_1034 + val_1124;
val_1126 = val_1039 + val_1125;
val_1127 = val_1063 + val_1064;
val_1128 = val_1043 + val_1127;
val_1129 = val_1044 + val_1128;
val_1130 = val_1065 + val_1066;
val_1131 = val_1043 + val_1130;
val_1132 = val_1047 + val_1131;
val_1133 = val_1063 + val_1066;
val_1134 = val_1043 + val_1133;
val_1135 = val_1048 + val_1134;
val_1136 = val_1049 + val_1133;
val_1137 = val_1044 + val_1136;
val_1138 = val_1067 + val_1068;
val_1139 = val_1052 + val_1138;
val_1140 = val_1053 + val_1139;
val_1141 = val_1067 + val_1069;
val_1142 = val_1055 + val_1141;
val_1143 = val_1056 + val_1142;
val_1144 = val_1068 + val_1070;
val_1145 = val_1055 + val_1144;
val_1146 = val_1058 + val_1145;
val_1147 = val_1055 + val_1138;
val_1148 = val_1053 + val_1147;
val_1149 = val_1059 + val_1071;
val_1150 = val_1072 + val_1149;
val_1151 = val_1035 + val_1150;
val_1152 = val_1061 + val_1071;
val_1153 = val_1072 + val_1152;
val_1154 = val_1037 + val_1153;
val_1155 = val_1073 + val_1152;
val_1156 = val_1039 + val_1155;
val_1157 = val_1061 + val_1074;
val_1158 = val_1072 + val_1157;
val_1159 = val_1039 + val_1158;
val_1160 = val_1063 + val_1075;
val_1161 = val_1076 + val_1160;
val_1162 = val_1044 + val_1161;
val_1163 = val_1065 + val_1077;
val_1164 = val_1076 + val_1163;
val_1165 = val_1047 + val_1164;
val_1166 = val_1063 + val_1077;
val_1167 = val_1076 + val_1166;
val_1168 = val_1048 + val_1167;
val_1169 = val_1078 + val_1166;
val_1170 = val_1044 + val_1169;
val_1171 = val_1067 + val_1079;
val_1172 = val_1080 + val_1171;
val_1173 = val_1053 + val_1172;
val_1174 = val_1067 + val_1081;
val_1175 = val_1082 + val_1174;
val_1176 = val_1056 + val_1175;
val_1177 = val_1070 + val_1079;
val_1178 = val_1082 + val_1177;
val_1179 = val_1058 + val_1178;
val_1180 = val_1082 + val_1171;
val_1181 = val_1053 + val_1180;
val_1182 = A[idx_off + 10] * A[idx_off + 87];
val_1183 = A[idx_off + 11] * A[idx_off + 88];
val_1184 = A[idx_off + 4] * A[idx_off + 89];
val_1185 = val_268 * val_1182;
val_1186 = val_269 * val_1182;
val_1187 = val_268 * val_1183;
val_1188 = val_269 * val_1183;
val_1189 = val_270 * val_1184;
val_1190 = val_271 * val_1184;
val_1191 = val_1085 * val_1185;
val_1192 = val_1088 * val_1185;
val_1193 = val_1090 * val_1185;
val_1194 = val_1093 * val_1185;
val_1195 = val_1085 * val_1186;
val_1196 = val_1088 * val_1186;
val_1197 = val_1090 * val_1186;
val_1198 = val_1093 * val_1186;
val_1199 = val_1096 * val_1187;
val_1200 = val_1099 * val_1187;
val_1201 = val_1102 * val_1187;
val_1202 = val_1104 * val_1187;
val_1203 = val_1096 * val_1188;
val_1204 = val_1099 * val_1188;
val_1205 = val_1102 * val_1188;
val_1206 = val_1104 * val_1188;
val_1207 = val_1107 * val_1189;
val_1208 = val_1110 * val_1189;
val_1209 = val_1113 * val_1189;
val_1210 = val_1115 * val_1189;
val_1211 = val_1107 * val_1190;
val_1212 = val_1110 * val_1190;
val_1213 = val_1113 * val_1190;
val_1214 = val_1115 * val_1190;
val_1215 = val_1118 * val_1185;
val_1216 = val_1121 * val_1185;
val_1217 = val_1123 * val_1185;
val_1218 = val_1126 * val_1185;
val_1219 = val_1118 * val_1186;
val_1220 = val_1121 * val_1186;
val_1221 = val_1123 * val_1186;
val_1222 = val_1126 * val_1186;
val_1223 = val_1129 * val_1187;
val_1224 = val_1132 * val_1187;
val_1225 = val_1135 * val_1187;
val_1226 = val_1137 * val_1187;
val_1227 = val_1129 * val_1188;
val_1228 = val_1132 * val_1188;
val_1229 = val_1135 * val_1188;
val_1230 = val_1137 * val_1188;
val_1231 = val_1140 * val_1189;
val_1232 = val_1143 * val_1189;
val_1233 = val_1146 * val_1189;
val_1234 = val_1148 * val_1189;
val_1235 = val_1140 * val_1190;
val_1236 = val_1143 * val_1190;
val_1237 = val_1146 * val_1190;
val_1238 = val_1148 * val_1190;
val_1239 = val_1151 * val_1185;
val_1240 = val_1154 * val_1185;
val_1241 = val_1156 * val_1185;
val_1242 = val_1159 * val_1185;
val_1243 = val_1151 * val_1186;
val_1244 = val_1154 * val_1186;
val_1245 = val_1156 * val_1186;
val_1246 = val_1159 * val_1186;
val_1247 = val_1162 * val_1187;
val_1248 = val_1165 * val_1187;
val_1249 = val_1168 * val_1187;
val_1250 = val_1170 * val_1187;
val_1251 = val_1162 * val_1188;
val_1252 = val_1165 * val_1188;
val_1253 = val_1168 * val_1188;
val_1254 = val_1170 * val_1188;
val_1255 = val_1173 * val_1189;
val_1256 = val_1176 * val_1189;
val_1257 = val_1179 * val_1189;
val_1258 = val_1181 * val_1189;
val_1259 = val_1173 * val_1190;
val_1260 = val_1176 * val_1190;
val_1261 = val_1179 * val_1190;
val_1262 = val_1181 * val_1190;
val_1263 = val_1191 + val_1199;
val_1264 = val_1192 + val_1200;
val_1265 = val_1193 + val_1201;
val_1266 = val_1194 + val_1202;
val_1267 = val_1195 + val_1203;
val_1268 = val_1196 + val_1204;
val_1269 = val_1197 + val_1205;
val_1270 = val_1198 + val_1206;
val_1271 = val_1207 + val_1263;
val_1272 = val_1208 + val_1264;
val_1273 = val_1209 + val_1265;
val_1274 = val_1210 + val_1266;
val_1275 = val_1211 + val_1267;
val_1276 = val_1212 + val_1268;
val_1277 = val_1213 + val_1269;
val_1278 = val_1214 + val_1270;
val_1279 = val_1215 + val_1223;
val_1280 = val_1216 + val_1224;
val_1281 = val_1217 + val_1225;
val_1282 = val_1218 + val_1226;
val_1283 = val_1219 + val_1227;
val_1284 = val_1220 + val_1228;
val_1285 = val_1221 + val_1229;
val_1286 = val_1222 + val_1230;
val_1287 = val_1231 + val_1279;
val_1288 = val_1232 + val_1280;
val_1289 = val_1233 + val_1281;
val_1290 = val_1234 + val_1282;
val_1291 = val_1235 + val_1283;
val_1292 = val_1236 + val_1284;
val_1293 = val_1237 + val_1285;
val_1294 = val_1238 + val_1286;
val_1295 = val_1239 + val_1247;
val_1296 = val_1240 + val_1248;
val_1297 = val_1241 + val_1249;
val_1298 = val_1242 + val_1250;
val_1299 = val_1243 + val_1251;
val_1300 = val_1244 + val_1252;
val_1301 = val_1245 + val_1253;
val_1302 = val_1246 + val_1254;
val_1303 = val_1255 + val_1295;
val_1304 = val_1256 + val_1296;
val_1305 = val_1257 + val_1297;
val_1306 = val_1258 + val_1298;
val_1307 = val_1259 + val_1299;
val_1308 = val_1260 + val_1300;
val_1309 = val_1261 + val_1301;
val_1310 = val_1262 + val_1302;
val_1311 = A[idx_off + 119] * val_970;
val_1312 = A[idx_off + 120] * val_971;
val_1313 = A[idx_off + 121] * val_972;
val_1314 = A[idx_off + 120] * val_973;
val_1315 = A[idx_off + 121] * val_974;
val_1316 = A[idx_off + 119] * val_975;
val_1317 = A[idx_off + 120] * val_976;
val_1318 = A[idx_off + 121] * val_977;
val_1319 = A[idx_off + 12] * val_1311;
val_1320 = A[idx_off + 0] * val_1312;
val_1321 = A[idx_off + 0] * val_1313;
val_1322 = A[idx_off + 0] * val_1311;
val_1323 = A[idx_off + 12] * val_1312;
val_1324 = A[idx_off + 24] * val_1311;
val_1325 = A[idx_off + 13] * val_1312;
val_1326 = A[idx_off + 12] * val_1313;
val_1327 = A[idx_off + 0] * val_1314;
val_1328 = A[idx_off + 0] * val_1315;
val_1329 = A[idx_off + 12] * val_1314;
val_1330 = A[idx_off + 13] * val_1314;
val_1331 = A[idx_off + 12] * val_1315;
val_1332 = A[idx_off + 12] * val_1316;
val_1333 = A[idx_off + 0] * val_1317;
val_1334 = A[idx_off + 0] * val_1318;
val_1335 = A[idx_off + 0] * val_1316;
val_1336 = A[idx_off + 12] * val_1317;
val_1337 = A[idx_off + 24] * val_1316;
val_1338 = A[idx_off + 13] * val_1317;
val_1339 = A[idx_off + 12] * val_1318;
val_1340 = val_1319 + val_1320;
val_1341 = val_1321 + val_1340;
val_1342 = val_1322 + val_1323;
val_1343 = val_1321 + val_1342;
val_1344 = val_1320 + val_1322;
val_1345 = val_1324 + val_1325;
val_1346 = val_1326 + val_1344;
val_1347 = val_1321 + val_1345;
val_1348 = val_1319 + val_1327;
val_1349 = val_1328 + val_1348;
val_1350 = val_1322 + val_1329;
val_1351 = val_1328 + val_1350;
val_1352 = val_1322 + val_1327;
val_1353 = val_1324 + val_1330;
val_1354 = val_1331 + val_1352;
val_1355 = val_1328 + val_1353;
val_1356 = val_1332 + val_1333;
val_1357 = val_1334 + val_1356;
val_1358 = val_1335 + val_1336;
val_1359 = val_1334 + val_1358;
val_1360 = val_1333 + val_1335;
val_1361 = val_1337 + val_1338;
val_1362 = val_1339 + val_1360;
val_1363 = val_1334 + val_1361;
val_1364 = A[idx_off + 130] * val_384;
val_1365 = A[idx_off + 131] * val_385;
val_1366 = A[idx_off + 132] * val_386;
val_1367 = A[idx_off + 133] * val_387;
val_1368 = A[idx_off + 130] * val_381;
val_1369 = A[idx_off + 131] * val_388;
val_1370 = A[idx_off + 132] * val_382;
val_1371 = A[idx_off + 133] * val_389;
val_1372 = A[idx_off + 131] * val_387;
val_1373 = A[idx_off + 132] * val_381;
val_1374 = A[idx_off + 133] * val_390;
val_1375 = A[idx_off + 0] * val_1364;
val_1376 = A[idx_off + 0] * val_1365;
val_1377 = A[idx_off + 8] * val_1366;
val_1378 = A[idx_off + 3] * val_1367;
val_1379 = A[idx_off + 0] * val_1368;
val_1380 = A[idx_off + 0] * val_1369;
val_1381 = A[idx_off + 8] * val_1370;
val_1382 = A[idx_off + 3] * val_1371;
val_1383 = A[idx_off + 0] * val_1372;
val_1384 = A[idx_off + 8] * val_1373;
val_1385 = A[idx_off + 3] * val_1374;
val_1386 = A[idx_off + 10] * val_1366;
val_1387 = A[idx_off + 15] * val_1367;
val_1388 = A[idx_off + 10] * val_1370;
val_1389 = A[idx_off + 15] * val_1371;
val_1390 = A[idx_off + 10] * val_1373;
val_1391 = A[idx_off + 15] * val_1374;
val_1392 = A[idx_off + 12] * val_1364;
val_1393 = A[idx_off + 12] * val_1365;
val_1394 = A[idx_off + 0] * val_1367;
val_1395 = A[idx_off + 12] * val_1368;
val_1396 = A[idx_off + 12] * val_1369;
val_1397 = A[idx_off + 0] * val_1371;
val_1398 = A[idx_off + 12] * val_1372;
val_1399 = A[idx_off + 0] * val_1374;
val_1400 = val_1271 * val_1375;
val_1401 = val_1272 * val_1376;
val_1402 = val_1273 * val_1377;
val_1403 = val_1274 * val_1378;
val_1404 = val_1275 * val_1375;
val_1405 = val_1276 * val_1376;
val_1406 = val_1277 * val_1377;
val_1407 = val_1278 * val_1378;
val_1408 = val_1271 * val_1379;
val_1409 = val_1272 * val_1380;
val_1410 = val_1273 * val_1381;
val_1411 = val_1274 * val_1382;
val_1412 = val_1275 * val_1379;
val_1413 = val_1276 * val_1380;
val_1414 = val_1277 * val_1381;
val_1415 = val_1278 * val_1382;
val_1416 = val_1272 * val_1383;
val_1417 = val_1273 * val_1384;
val_1418 = val_1274 * val_1385;
val_1419 = val_1276 * val_1383;
val_1420 = val_1277 * val_1384;
val_1421 = val_1278 * val_1385;
val_1422 = val_1287 * val_1375;
val_1423 = val_1288 * val_1376;
val_1424 = val_1289 * val_1386;
val_1425 = val_1290 * val_1387;
val_1426 = val_1291 * val_1375;
val_1427 = val_1292 * val_1376;
val_1428 = val_1293 * val_1386;
val_1429 = val_1294 * val_1387;
val_1430 = val_1287 * val_1379;
val_1431 = val_1288 * val_1380;
val_1432 = val_1289 * val_1388;
val_1433 = val_1290 * val_1389;
val_1434 = val_1291 * val_1379;
val_1435 = val_1292 * val_1380;
val_1436 = val_1293 * val_1388;
val_1437 = val_1294 * val_1389;
val_1438 = val_1288 * val_1383;
val_1439 = val_1289 * val_1390;
val_1440 = val_1290 * val_1391;
val_1441 = val_1292 * val_1383;
val_1442 = val_1293 * val_1390;
val_1443 = val_1294 * val_1391;
val_1444 = val_1303 * val_1392;
val_1445 = val_1304 * val_1393;
val_1446 = val_1305 * val_1377;
val_1447 = val_1306 * val_1394;
val_1448 = val_1307 * val_1392;
val_1449 = val_1308 * val_1393;
val_1450 = val_1309 * val_1377;
val_1451 = val_1310 * val_1394;
val_1452 = val_1303 * val_1395;
val_1453 = val_1304 * val_1396;
val_1454 = val_1305 * val_1381;
val_1455 = val_1306 * val_1397;
val_1456 = val_1307 * val_1395;
val_1457 = val_1308 * val_1396;
val_1458 = val_1309 * val_1381;
val_1459 = val_1310 * val_1397;
val_1460 = val_1304 * val_1398;
val_1461 = val_1305 * val_1384;
val_1462 = val_1306 * val_1399;
val_1463 = val_1308 * val_1398;
val_1464 = val_1309 * val_1384;
val_1465 = val_1310 * val_1399;
val_1466 = val_1400 + val_1401;
val_1467 = val_1402 + val_1466;
val_1468 = val_1403 + val_1467;
val_1469 = val_1404 + val_1405;
val_1470 = val_1406 + val_1469;
val_1471 = val_1407 + val_1470;
val_1472 = val_1408 + val_1409;
val_1473 = val_1410 + val_1472;
val_1474 = val_1411 + val_1473;
val_1475 = val_1412 + val_1413;
val_1476 = val_1414 + val_1475;
val_1477 = val_1415 + val_1476;
val_1478 = val_1416 + val_1417;
val_1479 = val_1418 + val_1478;
val_1480 = val_1419 + val_1420;
val_1481 = val_1421 + val_1480;
val_1482 = val_1422 + val_1423;
val_1483 = val_1424 + val_1482;
val_1484 = val_1425 + val_1483;
val_1485 = val_1426 + val_1427;
val_1486 = val_1428 + val_1485;
val_1487 = val_1429 + val_1486;
val_1488 = val_1430 + val_1431;
val_1489 = val_1432 + val_1488;
val_1490 = val_1433 + val_1489;
val_1491 = val_1434 + val_1435;
val_1492 = val_1436 + val_1491;
val_1493 = val_1437 + val_1492;
val_1494 = val_1438 + val_1439;
val_1495 = val_1440 + val_1494;
val_1496 = val_1441 + val_1442;
val_1497 = val_1443 + val_1496;
val_1498 = val_1444 + val_1445;
val_1499 = val_1446 + val_1498;
val_1500 = val_1447 + val_1499;
val_1501 = val_1448 + val_1449;
val_1502 = val_1450 + val_1501;
val_1503 = val_1451 + val_1502;
val_1504 = val_1452 + val_1453;
val_1505 = val_1454 + val_1504;
val_1506 = val_1455 + val_1505;
val_1507 = val_1456 + val_1457;
val_1508 = val_1458 + val_1507;
val_1509 = val_1459 + val_1508;
val_1510 = val_1460 + val_1461;
val_1511 = val_1462 + val_1510;
val_1512 = val_1463 + val_1464;
val_1513 = val_1465 + val_1512;
val_1514 = A[idx_off + 43] * val_1341;
val_1515 = A[idx_off + 43] * val_1343;
val_1516 = A[idx_off + 43] * val_1346;
val_1517 = A[idx_off + 43] * val_1347;
val_1518 = A[idx_off + 44] * val_1349;
val_1519 = A[idx_off + 44] * val_1351;
val_1520 = A[idx_off + 44] * val_1354;
val_1521 = A[idx_off + 44] * val_1355;
val_1522 = A[idx_off + 45] * val_1357;
val_1523 = A[idx_off + 45] * val_1359;
val_1524 = A[idx_off + 45] * val_1362;
val_1525 = A[idx_off + 45] * val_1363;
val_1526 = val_1468 * val_1514;
val_1527 = val_1471 * val_1514;
val_1528 = val_1474 * val_1515;
val_1529 = val_1477 * val_1514;
val_1530 = val_1479 * val_1516;
val_1531 = val_1481 * val_1517;
val_1532 = val_1484 * val_1518;
val_1533 = val_1487 * val_1518;
val_1534 = val_1490 * val_1519;
val_1535 = val_1493 * val_1518;
val_1536 = val_1495 * val_1520;
val_1537 = val_1497 * val_1521;
val_1538 = val_1500 * val_1522;
val_1539 = val_1503 * val_1522;
val_1540 = val_1506 * val_1523;
val_1541 = val_1509 * val_1522;
val_1542 = val_1511 * val_1524;
val_1543 = val_1513 * val_1525;
val_1544 = val_1526 + val_1532;
val_1545 = val_1527 + val_1533;
val_1546 = val_1528 + val_1534;
val_1547 = val_1529 + val_1535;
val_1548 = val_1530 + val_1536;
val_1549 = val_1531 + val_1537;
val_1550 = val_1538 + val_1544;
val_1551 = val_1539 + val_1545;
val_1552 = val_1540 + val_1546;
val_1553 = val_1541 + val_1547;
val_1554 = val_1542 + val_1548;
val_1555 = val_1543 + val_1549;
val_1556 = A[idx_off + 122] * val_1550;
val_1557 = A[idx_off + 123] * val_1551;
val_1558 = A[idx_off + 122] * val_1552;
val_1559 = A[idx_off + 123] * val_1553;
val_1560 = A[idx_off + 122] * val_1554;
val_1561 = A[idx_off + 123] * val_1555;
val_1562 = val_1556 + val_1557;
val_1563 = val_1558 + val_1559;
val_1564 = val_1560 + val_1561;
val_1565 = A[idx_off + 116] * val_1562;
val_1566 = A[idx_off + 117] * val_1563;
val_1567 = A[idx_off + 118] * val_1564;
val_1568 = val_1565 + val_1566;
val_1569 = val_1567 + val_1568;
A[i*n_inputs] += val_1569;
}
A[i*n_inputs] += val_1569;
}
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
#define N_INPUTS 192
#define N_ARITH 1420
const int n_inputs= N_INPUTS;
const int n_arith= N_ARITH;
const int batch_size= 1024;
const int iter= 1024;
const int thresh= n_arith/3;
size_t size= batch_size * (n_inputs) * (iter) * sizeof(float);
size_t size_idx= n_arith * sizeof(int);
float *h_A= (float *)malloc(size);
int *h_B= (int *)malloc(size_idx);
int *h_C= (int *)malloc(size_idx);
int *h_op_sel= (int *) malloc(size_idx);
// Initialize the host input vectors
for (int i = 0; i < n_arith; ++i)
{
if (i < thresh) {
h_B[i] = rand() % (n_inputs);
h_C[i] = rand() % (n_inputs);
}
else{
h_B[i] = rand() % (i);
h_C[i] = rand() % (i);
}
h_op_sel[i]= rand() % 2;
}
for (int i= 0; i < n_inputs; ++i) {
for (int b =0; b< batch_size; ++b) {
//h_A[b* n_inputs + i]= float(rand());
h_A[b* n_inputs + i]= 0.5;
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_C = NULL;
err = cudaMalloc((void **)&d_C, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_op_sel = NULL;
err = cudaMalloc((void **)&d_op_sel, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_C, h_C, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_op_sel, h_op_sel, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 32;
int blocksPerGrid= (batch_size + threadsPerBlock -1)/ threadsPerBlock;
struct timeval t1, t2;
// Perform Warmup
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t1, 0);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time of kernel: %3.4f ms \n", time);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Throughput: %.3f Gops/sec, Batch: %d, nIter: %d, n_arith: %d\n", (((1.0*batch_size*iter*n_arith))/time)/10E6, batch_size, iter, n_arith);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//for (int i=0; i<numElements; i++) {
for (int i=0; i<32; i++) {
printf("%d : %f,", i, h_A[i*n_inputs]);
}
err = cudaFree(d_A);
err = cudaFree(d_B);
err = cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
printf("Done!\n");
return 0;
}
|
8,670 | #include <iostream>
#include <string>
#include <fstream>
#include <vector>
#include <sstream>
using namespace std;
vector<string> split(const string &s, char delim) {
stringstream ss(s);
string item;
vector<string> tokens;
while (getline(ss, item, delim)) {
tokens.push_back(item);
}
return tokens;
}
void read(string filename, double** P_sparse, int** row_ind, int** col_ind, int* nnz, int * n)
{
fstream f(filename.c_str());
int v, e;
int ignore = 0; // Generally no links mapping
int one_starting = 1;
std::string::size_type sz; // alias of size_t
/*
* Parsing the metadata of data
*/
string metadata;
getline (f, metadata);
vector<string> tokens = split(metadata, ' ');
v = stoi ( tokens[0], &sz );
e = stoi ( tokens[1], &sz );
cout<<"Number of Vertices = "<<v<<endl;
cout<<"Number of Edges = "<<e<<endl;
if (tokens.size() == 2) { // Ignore the link to number mapping
ignore = 1;
}
else if (tokens.size() == 3) {
ignore = stoi ( tokens[2], &sz);
}
else if (tokens.size() == 4) {
ignore = stoi ( tokens[2], &sz);
one_starting = stoi ( tokens[3], &sz );
}
*nnz = e;
*n = v;
if (ignore) {
string dummy;
int du;
for(int i = 0; i < v; i++)
{
f >> du >> dummy;
}
}
*row_ind = new int[e];
*col_ind = new int[v + 1];
for(int i = 0; i < v + 1; i++)
{
(*col_ind)[i] = 0;
}
*P_sparse = new double[e];
int curLengthCumulative = 0;
int curRow, prevRow = 0;
for(int i = 0; i < *nnz; i++)
{
f >> curRow;
f >> (*row_ind)[i];
if (one_starting)
(*row_ind)[i]--;
if (!one_starting)
curRow ++;
// curRow--;
if (curRow != prevRow)
{
for (int j = prevRow + 1; j < curRow; j++)
(*col_ind)[j] = curLengthCumulative;
(*col_ind)[prevRow] = curLengthCumulative;
prevRow = curRow;
}
curLengthCumulative++;
}
(*col_ind)[curRow] = curLengthCumulative;
int outdeg = 0;
for (int i = 0; i < *n; i++) {
outdeg = (*col_ind)[i + 1] - (*col_ind)[i];
for (int j = (*col_ind)[i]; j < (*col_ind)[i + 1]; j++) {
(*P_sparse)[j] = 1.0 / outdeg;
}
}
}
/*
int main()
{
string filename = "hollins.dat";
int *row_ind, *col_ind, *nnzPerVectorA;
double* P_sparse;
int nnz, n;
read(filename, &P_sparse, &row_ind, &col_ind, &nnz, &n, &nnzPerVectorA);
cout << n << endl << nnz << endl;
cout << col_ind[0] << endl << col_ind[n-1] << endl;
}
*/
|
8,671 | /* Author: Joel Deen
* This library takes in arrays of intergers and preforms various vector operations using the GPU.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "LAVectorGPUOperations.cuh"
#include <exception>
// GPU kernel Calls ----------------------------------------------------------------------------------------------------------
// just the int add kernal, nothing special for any of the kernals
__global__ void intAdd(int* c, const int* a, const int* b, const unsigned int d)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < d) {
c[i] = a[i] + b[i];
}
}
__global__ void intSubtract(int* c, const int* a, const int* b, const unsigned int d) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < d) {
c[i] = a[i] + b[i];
}
}
__global__ void intMultiply(int* result, const int* val1, const int val2, const unsigned int size) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size) {
result[blockIdx.x] = val1[blockIdx.x] * val2;
}
}
__global__ void intDivide(int* result, const int* val1, const int val2, const unsigned int size) { // we are assuming that the user does some prechecking for zero
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size) {
result[blockIdx.x] = val1[blockIdx.x] / val2;
}
}
// CPU Calls -----------------------------------------------------------------------------------------------------------------
int* gpuIntAdd(int* values1, int* values2, unsigned int size) {
cudaError_t cuda_status = cudaSetDevice(0);
if (cuda_status != cudaSuccess) {
throw 1;
}
int* result = new int[size];
int* cuda_vec1;
int* cuda_vec2;
int* cuda_result;
cuda_status = cudaMalloc((void**)&cuda_vec1, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMalloc((void**)&cuda_vec2, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMalloc((void**)&cuda_result, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMemcpy(cuda_vec1, values1, size * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
throw 3;
}
cuda_status = cudaMemcpy(cuda_vec2, values2, size * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
throw 3;
}
intAdd << <1 + (size / MAX_ADDING_THREADS), MAX_ADDING_THREADS >> > (cuda_result, cuda_vec1, cuda_vec2, size);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
throw 4;
}
cuda_status = cudaMemcpy(result, cuda_result, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cuda_status != cudaSuccess) {
throw 5;
}
cuda_status = cudaFree(cuda_vec1);
if (cuda_status != cudaSuccess) {
throw 6;
}
cuda_status = cudaFree(cuda_vec2);
if (cuda_status != cudaSuccess) {
throw 6;
}
cuda_status = cudaFree(cuda_result);
if (cuda_status != cudaSuccess) {
throw 6;
}
return result;
}
int* gpuIntSubtract(int* values1, int* values2, unsigned int size) {
cudaError_t cuda_status = cudaSetDevice(0);
if (cuda_status != cudaSuccess) {
throw 1;
}
int* result = new int[size];
int* cuda_vec1;
int* cuda_vec2;
int* cuda_result;
cuda_status = cudaMalloc((void**)&cuda_vec1, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMalloc((void**)&cuda_vec2, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMalloc((void**)&cuda_result, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMemcpy(cuda_vec1, values1, size * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
throw 3;
}
cuda_status = cudaMemcpy(cuda_vec2, values2, size * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
throw 3;
}
intSubtract << <1 + (size / MAX_ADDING_THREADS), MAX_ADDING_THREADS >> > (cuda_result, cuda_vec1, cuda_vec2, size);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
throw 4;
}
cuda_status = cudaMemcpy(result, cuda_result, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cuda_status != cudaSuccess) {
throw 5;
}
cuda_status = cudaFree(cuda_vec1);
if (cuda_status != cudaSuccess) {
throw 6;
}
cuda_status = cudaFree(cuda_vec2);
if (cuda_status != cudaSuccess) {
throw 6;
}
cuda_status = cudaFree(cuda_result);
if (cuda_status != cudaSuccess) {
throw 6;
}
return result;
}
int* gpuIntMultiply(int* values1, int value, unsigned int size) {
cudaError_t cuda_status = cudaSetDevice(0);
if (cuda_status != cudaSuccess) {
throw 1;
}
int* result = new int[size];
int* cuda_vec1;
int* cuda_result;
cuda_status = cudaMalloc((void**)&cuda_vec1, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMalloc((void**)&cuda_result, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMemcpy(cuda_vec1, values1, size * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
throw 3;
}
intMultiply << <1 + (size / MAX_ADDING_THREADS), MAX_ADDING_THREADS >> > (cuda_result, cuda_vec1, value, size);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
throw 4;
}
cuda_status = cudaMemcpy(result, cuda_result, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cuda_status != cudaSuccess) {
throw 5;
}
cuda_status = cudaFree(cuda_vec1);
if (cuda_status != cudaSuccess) {
throw 6;
}
cuda_status = cudaFree(cuda_result);
if (cuda_status != cudaSuccess) {
throw 6;
}
return result;
}
int* gpuIntDivide(int* values1, int value, unsigned int size) {
if (value == 0) {
throw 7;
}
cudaError_t cuda_status = cudaSetDevice(0);
if (cuda_status != cudaSuccess) {
throw 1;
}
int* result = new int[size];
int* cuda_vec1;
int* cuda_result;
cuda_status = cudaMalloc((void**)&cuda_vec1, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMalloc((void**)&cuda_result, size * sizeof(int));
if (cuda_status != cudaSuccess) {
throw 2;
}
cuda_status = cudaMemcpy(cuda_vec1, values1, size * sizeof(int), cudaMemcpyHostToDevice);
if (cuda_status != cudaSuccess) {
throw 3;
}
intMultiply << <1 + (size / MAX_ADDING_THREADS), MAX_ADDING_THREADS >> > (cuda_result, cuda_vec1, value, size);
cuda_status = cudaGetLastError();
if (cuda_status != cudaSuccess) {
throw 4;
}
cuda_status = cudaMemcpy(result, cuda_result, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cuda_status != cudaSuccess) {
throw 5;
}
cuda_status = cudaFree(cuda_vec1);
if (cuda_status != cudaSuccess) {
throw 6;
}
cuda_status = cudaFree(cuda_result);
if (cuda_status != cudaSuccess) {
throw 6;
}
return result;
} |
8,672 | #include<stdio.h>
#include<string.h>
/* if poly[blockIdx.x]=',', set result[blockIdx.x]=1. The number of vertices is the number of 1 in result plus 1 */
__global__ void kernel(char *poly, int *result, int len){
if(blockIdx.x<len){
if(poly[blockIdx.x]==',')
result[blockIdx.x] = 1;
else result[blockIdx.x] = 0;
}
}
int countv(char *polygon)
{
char * dev_poly;
int * dev_result;
int * host_result;
int i,result;
int len = strlen(polygon);
cudaMalloc((void **)&dev_poly, len);
cudaMalloc((void **)&dev_result, len*sizeof(int));
cudaMemcpy(dev_poly, polygon, len, cudaMemcpyHostToDevice);
kernel<<<len,1>>> (dev_poly, dev_result, len);
host_result = (int *)malloc(len*sizeof(int));
cudaMemcpy(host_result, dev_result, len*sizeof(int), cudaMemcpyDeviceToHost);
//count the 1's in host_result
result = 0;
for(i=0;i<len;i++){
if(host_result[i]==1)result++;
}
result ++;
return result;
}
|
8,673 | #include "includes.h"
__global__ void CalculateSampleB( const float *background, float *subBG, const int wb, const int hb, const int ws, const int hs, const int sRate ){
const int ys = blockIdx.y * blockDim.y + threadIdx.y;
const int xs = blockIdx.x * blockDim.x + threadIdx.x;
const int curst = ws * ys + xs;
if (ys < hs && xs < ws){
const int yb = ys * sRate;
const int xb = xs * sRate;
int num = 0;
float sum[3] = {0};
for (int i=0; i<sRate; i++){
for (int j=0; j<sRate; j++){
if (yb + i < hb && xb + j < wb){
int curb = wb * (yb+i) + (xb+j);
sum[0] += background[curb*3+0];
sum[1] += background[curb*3+1];
sum[2] += background[curb*3+2];
num++;
}
}
}
subBG[curst*3+0] = sum[0] / num;
subBG[curst*3+1] = sum[1] / num;
subBG[curst*3+2] = sum[2] / num;
}
} |
8,674 | #include "includes.h"
float *A,*L,*U,*input;
void arrayInit(int n);
void verifyLU(int n);
void updateLU(int n);
void freemem(int n);
/*
*/
__global__ void scale( float *a, int b, int c) {
int index=c,size=b,k=0;
for(k=index+1;k<size;k++) {
a[size*index + k] = a[size*index + k] / a[size*index + index];
}
} |
8,675 | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s): Ben Hapip, Damon Hage, Thomas Ames
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
const int block_size = 1024;
// kernel function (CUDA device)
__global__ void sine_parallel(float *input, float *output)
{
/*
creates a thread_id for each thread based upon its position in every block.
block_size defined at 1024.
this statement assigns every thread a value 0-(N-1) that will access its corresponding element of the array at the id.
*/
int thread_id = blockIdx.x * block_size + threadIdx.x;
/*
This if-else statement is meant to prevent accesses past the end of the array.
*/
if(thread_id <= N)
{
float value = input[thread_id];
float numer = input[thread_id] * input[thread_id] * input[thread_id];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[thread_id] * input[thread_id];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[thread_id] = value;
}
else
{
}
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
/*
Declaration of variables
*/
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
float *h_gpu_result = (float*)malloc(N*sizeof(float));
float *d_input;
float *d_output;
//BEGIN: CPU implementation (do not modify)
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//BEGIN: GPU implementation
//timer to calculate total GPU run time
long long Total_GPU_run_Time= start_timer();
//timer for memory allocation
long long GPU_mem_allocation = start_timer();
cudaMalloc((void**) &d_input,N*sizeof(float));
cudaMalloc((void**) &d_output,N*sizeof(float));
long long GPU_mem_allocation_result = stop_timer(GPU_mem_allocation, "\nGPU Memory Allocation");
//timer for memory copy to device
long long GPU_copy_to_device = start_timer();
cudaMemcpy(d_input, h_input, N*sizeof(float), cudaMemcpyHostToDevice);
long long GPU_copy_to_device_result = stop_timer(GPU_copy_to_device, "GPU Memory Copy to Device");
//timer and execution of our GPU Kernel
long long GPU_kernel_start_time = start_timer();
sine_parallel <<<(N/block_size + 1),block_size>>>(d_input, d_output);
cudaThreadSynchronize();
long long GPU_kernel_end_time = stop_timer(GPU_kernel_start_time, "GPU Kernel Run Time");
//timer for memory copy back to host
long long GPU_copy_to_host = start_timer();
cudaMemcpy(h_gpu_result, d_output, N*sizeof(float), cudaMemcpyDeviceToHost);
long long GPU_copy_to_host_result = stop_timer(GPU_copy_to_host, "GPU Memory Copy to Host");
//printing total GPU run time
long long Result_of_GPU_run_time = stop_timer(Total_GPU_run_Time, "\nTotal GPU Run Time");
//END: GPU implementation
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("\nResult comparison failed.\n Total Errors: %d\n", errorCount);
else
printf("\nResult comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
return 0;
}
|
8,676 | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the extension scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double min_abs(double x, double y)
{
return (fabs(x)<fabs(y)) ? x : y;
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
__device__ inline
double weno_onesided_derivative(double v1, double v2, double v3, double v4, double v5)
{
// different choices of ENO derivatives
double phi1 = 1./3. * v1 - 7./6. * v2 + 11./6. * v3;
double phi2 = -1./6. * v2 + 5./6. * v3 + 1./3. * v4;
double phi3 = 1./3. * v3 + 5./6. * v4 - 1./6. * v5;
// smoothness parameter
double S1 = 13./12. * pow((v1 - 2*v2 + v3),2) + 1./4. * pow((v1 - 4*v2 + 3*v3),2);
double S2 = 13./12. * pow((v2 - 2*v3 + v4),2) + 1./4. * pow((v2 - v4),2);
double S3 = 13./12. * pow((v3 - 2*v4 + v5),2) + 1./4. * pow((3*v3 - 4*v4 + v5),2);
double epsilon = 1e-6;
double alpha1 = 0.1 / pow( (S1 + epsilon), 2);
double alpha2 = 0.6 / pow( (S2 + epsilon), 2);
double alpha3 = 0.3 / pow( (S3 + epsilon), 2);
// weights for each stencil
double sum = alpha1 + alpha2 + alpha3;
double omega1 = alpha1 / sum;
double omega2 = alpha2 / sum;
double omega3 = alpha3 / sum;
return (omega1*phi1 + omega2*phi2 + omega3*phi3);
}
// given a stencil across the boundary: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// create a new stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
__device__ inline
void select_stencil(double & h3m, double & h2m, double & h1m, double & h0, double & h1, double & h2, double & h3, double & x3m, double & x2m, double & x1m, double & x0, double & x1, double & x2, double & x3, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds, double h_fore, double h_back)
{
h0 = p4; x0 = 0.0;
if(r1<ds){
x1 = r1;
x2 = ds;
x3 = 2*ds;
h1 = h_fore;
h2 = p5;
h3 = p6;
}else{
x1 = ds;
x2 = 2*ds;
x3 = 3*ds;
h1 = p5;
h2 = p6;
h3 = p7;
}
if(l1<ds){
x1m = -l1;
x2m = - ds;
x3m = - 2*ds;
h1m = h_back;
h2m = p3;
h3m = p2;
}else{
x1m = -ds;
x2m = - 2*ds;
x3m = - 3*ds;
h1m = p3;
h2m = p2;
h3m = p1;
}
}
// for stencil (x3m,h3m),(x2m,h2m),(x1m,h1m),(x0,h0),(x1,h1),(x2,h2),(x3,h3) including boundary nodes
// calculate cubic eno derivatives at (x0,h0)
// note that this is a nonuniform stencil
__device__ inline
void ENO_cubic_derivative(double & d_fore, double & d_back, double h3m, double h2m, double h1m, double h0, double h1, double h2, double h3, double x3m, double x2m, double x1m, double x0, double x1, double x2, double x3)
{
// divided differences
double d1_2_5 = (h3 - h2) / (x3 - x2) ;
double d1_1_5 = (h2 - h1) / (x2 - x1) ;
double d1_0_5 = (h1 - h0) / (x1 - x0) ;
double d1_m0_5 = (h0 - h1m) / (x0 - x1m);
double d1_m1_5 = (h1m - h2m) / (x1m - x2m);
double d1_m2_5 = (h2m - h3m) / (x2m - x3m);
double d2_2 = (d1_2_5 - d1_1_5) / (x3 - x1) ;
double d2_1 = (d1_1_5 - d1_0_5) / (x2 - x0) ;
double d2_0 = (d1_0_5 - d1_m0_5) / (x1 - x1m);
double d2_m1 = (d1_m0_5 - d1_m1_5) / (x0 - x2m);
double d2_m2 = (d1_m1_5 - d1_m2_5) / (x1m - x3m);
double d3_1_5 = (d2_2 - d2_1) / (x3 - x0) ;
double d3_0_5 = (d2_1 - d2_0) / (x2 - x1m);
double d3_m0_5 = (d2_0 - d2_m1) / (x1 - x2m);
double d3_m1_5 = (d2_m1 - d2_m2) / (x0 - x3m);
double a1 = (x0 - x1m) * (x0 - x2m) * min_abs(d3_m0_5, d3_m1_5);
double a2 = (x0 - x1m) * (x0 - x1) * min_abs(d3_m0_5, d3_0_5);
double a = (fabs(d2_m1) < fabs(d2_0)) ? a1 : a2;
double b1 = (x0 - x1m) * (x0 - x1) * min_abs(d3_m0_5, d3_0_5);
double b2 = (x0 - x1) * (x0 - x2) * min_abs(d3_0_5, d3_1_5);
double b = (fabs(d2_0) < fabs(d2_1)) ? b1 : b2;
d_back = d1_m0_5 + min_mod(d2_m1,d2_0) * (x0 - x1m) + a;
d_fore = d1_0_5 + min_mod(d2_0, d2_1) * (x0 - x1) + b;
}
// calculate weno derivative at p4: p1<-l3-p2<-l2-p3<-l1-p4-r1->p5-r2->p6-r3->p7
// where px are level set function values at node x
// lx, rx are distance to the left/right node
__device__ inline
void weno_derivative_boundary(double & d_fore, double & d_back, double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds, double v_fore, double v_back)
{
// the condistion below is better than p3*p4<0 || p4*p5<0
bool cross_interface = r1<ds || l1<ds;
if(!cross_interface){
double v1 = (p2 - p1) / ds;
double v2 = (p3 - p2) / ds;
double v3 = (p4 - p3) / ds;
double v4 = (p5 - p4) / ds;
double v5 = (p6 - p5) / ds;
double v6 = (p7 - p6) / ds;
d_back = weno_onesided_derivative(v1,v2,v3,v4,v5);
d_fore = weno_onesided_derivative(v6,v5,v4,v3,v2);
}// if not a node IMMEDIATELY adjacent to the boundary, calculate weno derivatives as usual
else{
double h3m,h2m,h1m,h0,h1,h2,h3;
double x3m,x2m,x1m,x0,x1,x2,x3;
select_stencil(h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds,v_fore,v_back);
ENO_cubic_derivative(d_fore,d_back,h3m,h2m,h1m,h0,h1,h2,h3,x3m,x2m,x1m,x0,x1,x2,x3);
}// for nodes IMMEDIATELY adjacent to the boundary, use cubic ENO interpolant
}
__device__ inline
double upwind_normal_point(double p1, double p2, double p3, double p4, double p5, double p6, double p7, double r1, double r2, double r3, double l1, double l2, double l3, double ds)
{
double d_back, d_fore;
weno_derivative_boundary(d_fore,d_back,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,ds,0.0,0.0);
return (fabs(p5)<fabs(p3)) ? d_fore : d_back;
}
// calculate the upwind normal
__global__
void upwind_normal(double * nx, double * ny, double * nz, double const * lsf, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
nx[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
ny[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
nz[ind] = upwind_normal_point(p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz);
}
__device__ inline
void cubic_interp_coefficient(double & c0, double & c1, double & c2, double & c3, double v0, double v1, double v2, double v3, double s)
{
c0 = v0;
c1 = ( 3.0 * (v1-v0) - 3.0/2.0 * (v2-v0) + 1.0/3.0 * (v3-v0) ) / s;
c2 = (-5.0/2.0 * (v1-v0) + 2.0 * (v2-v0) - 1.0/2.0 * (v3-v0) ) / pow(s,2);
c3 = ( 1.0/2.0 * (v1-v0) - 1.0/2.0 * (v2-v0) + 1.0/6.0 * (v3-v0) ) / pow(s,3);
}
// modify forward/backward(c_f/b) values at v0:[v4,v1,v0,v2,v3]
// if dis_b/f!=ds, then there is boundary nearby, a cubic interpolant is then constructed
// C(x) = c0 + c1*x + c2*x^2 + c3*c^3 through (0,v1),(ds,v0),(2*ds,v2),(3*ds,v2) assuming boundary is between v0,v2
// and used to calculate c_b/f at boundary crossing nodes
__device__ inline
void cubic_interp(double & c_forward, double & c_backward, double dis_f, double dis_b, double ds, double v4, double v1, double v0, double v2, double v3)
{
c_forward = 0;
c_backward = 0;
double c0,c1,c2,c3; // coefficient for cubic interpolant
// if there is a boundary in the forward direction
if(dis_f!=ds){
cubic_interp_coefficient(c0,c1,c2,c3,v1,v0,v2,v3,ds);
double xc = ds + dis_f; // coordinate of the boundary point
c_forward = c0 + c1 * xc + c2 * pow(xc,2) + c3 * pow(xc,3);
}
// if there is a boundary in the backward direction
if(dis_b!=ds){
cubic_interp_coefficient(c0,c1,c2,c3,v4,v1,v0,v2,ds);
double xc = 2*ds - dis_b;
c_backward = c0 + c1 * xc + c2 * pow(xc,2) + c3 * pow(xc,3);
}
}
// interpolate values at boundary points
__global__
void boundary_interpolate(double * cpr, double * cpl, double * cpf, double * cpb, double * cpu, double * cpd, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * lsf, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
cubic_interp(cpr[ind],cpl[ind],xpr[ind],xpl[ind],dx,lsf[left2],lsf[left],lsf[ind],lsf[right],lsf[right2]);
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
cubic_interp(cpf[ind],cpb[ind],ypf[ind],ypb[ind],dy,lsf[back2],lsf[back],lsf[ind],lsf[front],lsf[front2]);
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
cubic_interp(cpu[ind],cpd[ind],zpu[ind],zpd[ind],dz,lsf[down2],lsf[down],lsf[ind],lsf[up],lsf[up2]);
}
// calculate extend step
// now lsf represents a scalar field (not the level set function)
__global__
void extend_step(double * step, double const * deltat, double const * lsf, double const * vx, double const * vy, double const * vz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, double const * cpr, double const * cpl, double const * cpf, double const * cpb, double const * cpu, double const * cpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double p1,p2,p3,p4,p5,p6,p7;
double r1,r2,r3,l1,l2,l3;
double v_fore, v_back;
p4 = lsf[ind];
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
p1 = lsf[left3];
p2 = lsf[left2];
p3 = lsf[left1];
p5 = lsf[rght1];
p6 = lsf[rght2];
p7 = lsf[rght3];
r1 = xpr[ind];
r2 = xpr[rght1];
r3 = xpr[rght2];
l1 = xpl[ind];
l2 = xpl[left1];
l3 = xpl[left2];
v_fore = cpr[ind];
v_back = cpl[ind];
double xR, xL;
weno_derivative_boundary(xR,xL,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dx,v_fore,v_back);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
p1 = lsf[back3];
p2 = lsf[back2];
p3 = lsf[back1];
p5 = lsf[frnt1];
p6 = lsf[frnt2];
p7 = lsf[frnt3];
r1 = ypf[ind];
r2 = ypf[frnt1];
r3 = ypf[frnt2];
l1 = ypb[ind];
l2 = ypb[back1];
l3 = ypb[back2];
v_fore = cpf[ind];
v_back = cpb[ind];
double yF, yB;
weno_derivative_boundary(yF,yB,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dy,v_fore,v_back);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
p1 = lsf[down3];
p2 = lsf[down2];
p3 = lsf[down1];
p5 = lsf[upup1];
p6 = lsf[upup2];
p7 = lsf[upup3];
r1 = zpu[ind];
r2 = zpu[upup1];
r3 = zpu[upup2];
l1 = zpd[ind];
l2 = zpd[down1];
l3 = zpd[down2];
v_fore = cpu[ind];
v_back = cpd[ind];
double zU, zD;
weno_derivative_boundary(zU,zD,p1,p2,p3,p4,p5,p6,p7,r1,r2,r3,l1,l2,l3,dz,v_fore,v_back);
step[ind] = (min2(0,vx[ind]) * xR + max2(0,vx[ind]) * xL +
min2(0,vy[ind]) * yF + max2(0,vy[ind]) * yB +
min2(0,vz[ind]) * zU + max2(0,vz[ind]) * zD ) * deltat[ind];
}
|
8,677 | #define MODE_MANDEL 1
#define MODE_MANDEL_DISTANCE 2
#define MODE_JULIA 3
#define WIDTH gridDim.x*blockDim.x
#define HEIGHT gridDim.y*blockDim.y
#define X ((blockIdx.x * blockDim.x) + threadIdx.x)
#define Y ((blockIdx.y * blockDim.y) + threadIdx.y)
__device__ inline double2 mul(const double2 pFF1, const double2 pFF2) {
const double hi = pFF1.x;
const double lo = pFF1.y;
const double yhi = pFF2.x;
const double ylo = pFF2.y;
double t, tau, u, v, w;
t = hi * yhi; /* Highest order double term. */
if (t == 0) {
return make_double2(0,0);
}
tau = fma(hi, yhi, -t);
v = hi * ylo;
w = lo * yhi;
tau += v + w; /* Add in other second-order terms. */
u = t + tau;
return make_double2(u, (t - u) + tau);
}
__device__ inline double2 mulDouble(const double2 pFF1, const double pDouble) {
const double hi = pFF1.x;
const double lo = pFF1.y;
const double yhi = pDouble;
double t, tau, u, w;
t = hi * yhi; /* Highest order double term. */
if (t == 0) {
return make_double2(0,0);
}
tau = fma(hi, yhi, -t);
w = lo * yhi;
tau += w; /* Add in other second-order terms. */
u = t + tau;
return make_double2(u, (t - u) + tau);
}
__device__ inline double2 add(const double2 pFF1, const double2 pFF2) {
const double hi = pFF1.x;
const double lo = pFF1.y;
const double yhi = pFF2.x;
const double ylo = pFF2.y;
double z, q, zz, xh;
z = hi + yhi;
q = hi - z;
zz = q + yhi + (hi - (q + z)) + lo + ylo;
/* Keep -0 result. */
if (zz == 0.0) {
return make_double2(z,0);
}
xh = z + zz;
return make_double2(xh,z - xh + zz);
}
__device__ inline double2 addDouble(const double2 pFF1, const double y) {
double hi = pFF1.x;
double lo = pFF1.y;
double z, q, zz, xh;
z = hi + y;
q = hi - z;
zz = q + y + (hi - (q + z)) + lo;
/* Keep -0 result. */
if (zz == 0.0) {
return make_double2(z,0);
}
xh = z + zz;
return make_double2(xh,z - xh + zz);
}
__device__ inline double2 sub(const double2 pFF1, const double2 pFF2) {
return add(pFF1, make_double2(-pFF2.x, -pFF2.y));
}
extern "C"
__global__ void compute(
int *iters,
double *lastValuesR,
double *lastValuesI,
double *distancesR,
double *distancesI,
const int mode,
const int4 tile,
const double2 xStart,
const double2 yStart,
const double2 juliaCr,
const double2 juliaCi,
const double2 xInc,
const double2 yInc,
const int maxIterations,
const double sqrEscapeRadius
) {
if (X >= tile.z || Y >= tile.w) { // tile.z is width of tile, tile.w is height of tile
return;
}
const double2 x = add(make_double2(xStart.x, xStart.y), mulDouble(make_double2(xInc.x, xInc.y), X));
const double2 y = add(make_double2(yStart.x, yStart.y), mulDouble(make_double2(yInc.x, yInc.y), Y));
const double2 cr = mode == MODE_JULIA ? juliaCr : x;
const double2 ci = mode == MODE_JULIA ? juliaCi : y;
double2 zr = x;
double2 zi = y;
double2 tmp;
// distance
double2 dr = make_double2(1, 0);
double2 di = make_double2(0, 0);
double2 new_dr;
int count = 0;
for (; count < maxIterations; count++) {
const double2 zrsqr = mul(zr, zr);
const double2 zisqr = mul(zi, zi);
if (add(zrsqr, zisqr).x >= sqrEscapeRadius) {
break;
}
if (mode == MODE_MANDEL_DISTANCE) {
// new_dr = 2.0f * (zr * dr - zi * di) + 1.0f;
new_dr = addDouble(mulDouble(sub(mul(zr, dr), mul(zi, di)), 2.0), 1.0);
// di = 2.0f * (zr * di + zi * dr);
di = mulDouble(add(mul(zr, di), mul(zi, dr)), 2.0);
dr = new_dr;
}
tmp = add(sub(zrsqr, zisqr), cr);
zi = add(mulDouble(mul(zr, zi), 2.0), ci);
zr = tmp;
}
const int tIndex = X + Y * tile.z; // tile.z is width of tile
iters[tIndex] = count;
lastValuesR[tIndex] = (double) zr.x + (double) zr.y;
lastValuesI[tIndex] = (double) zi.x + (double) zi.y;
if (mode == MODE_MANDEL_DISTANCE) {
distancesR[tIndex] = (double) dr.x + (double) dr.y;
distancesI[tIndex] = (double) di.x + (double) di.y;
}
} |
8,678 | #include <sys/time.h>
#include <stdio.h>
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
__global__ void warmingup(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if ((tid / warpSize) % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel1(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if (tid % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel2(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if ((tid / warpSize) % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel3(float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0;
bool ipred = (tid % 2 == 0);
if (ipred) {
ia = 100.0f;
}
if (!ipred) {
ib = 100.0f;
}
c[tid] = ia + ib;
}
int main(int argc, char **argv) {
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("%s Using Device %d: %s\n", argv[0], dev, deviceProp.name);
int size = 64;
int blockSize = 64;
if (argc > 1) blockSize = atoi(argv[1]);
if (argc > 2) size = atoi(argv[2]);
printf("Data size %d\n", size);
dim3 block(blockSize, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("Execution Configure (block %d grid %d)\n", block.x, grid.x);
float *d_C;
size_t nBytes = size * sizeof(float);
cudaMalloc((float**)&d_C, nBytes);
double iStart, iElaps;
cudaDeviceSynchronize();
iStart = cpuSecond();
warmingup<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("warmup <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
mathKernel1<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel1 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
mathKernel2<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel2 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
iStart = cpuSecond();
mathKernel3<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel3 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);
/*iStart = cpuSecond();
mathKernel4<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel4 <<<%4d %4d>>> elaped %f sec\n", grid.x, block.x, iElaps);*/
cudaFree(d_C);
cudaDeviceReset();
return EXIT_SUCCESS;
} |
8,679 | #include "includes.h"
__global__ void kernel_sqrtweights(int N, double *wt){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
/* make sure to use only N threads */
if (tid<N) {
wt[tid]=sqrt(wt[tid]);
}
} |
8,680 | // Simple CUDA example
#include <stdio.h>
const int N = 16;
__global__ void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
// Kernel definition Vector Add
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
// Kernel definition Matrix Add
__global__ void MatAdd(float *A, float *B, float *C, int N)
{
int i = threadIdx.x;
int j = threadIdx.y;
if (i < N && j < N)
{
C[i*N+j] = A[i*N+j] + B[i*N+j];
}
}
void printMat(float *a, int N, const char* name)
{
printf("%s\n",name);
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
printf("%.2f\t", a[i*N+j]);
}
printf("\n");
}
}
int main()
{
// float a[N][N], b[N][N], c[N][N];
float *a, *b, *c;
a = (float *)malloc(N*N*sizeof(float));
b = (float *)malloc(N*N*sizeof(float));
c = (float *)malloc(N*N*sizeof(float));
// Initial values
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < N; ++j)
{
a[i*N+j] = 0.1f*i*j;
b[i*N+j] = 0.1f*(i+j*N);
}
}
// NxN now
float *ad, *bd, *cd;
const int size = N*N*sizeof(float);
// print a, b, c before
printMat(a,N,"a");
printMat(b,N, "b");
printMat(c,N, "c before");
cudaMalloc( (void**)&ad, size );
cudaMalloc( (void**)&bd, size );
cudaMalloc( (void**)&cd, size );
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
// NxN block now
dim3 dimBlock( N, N );
dim3 dimGrid( 1, 1 );
MatAdd<<<dimGrid, dimBlock>>>(ad, bd, cd, N);
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
cudaFree( cd );
// print results
printMat(a,N, "a");
printMat(b,N, "b");
printMat(c,N, "c after");
return EXIT_SUCCESS;
}
|
8,681 | #include "includes.h"
__device__ void get_vertex_row_group(int *row_group, bool *dl_matrix, const int vertex_num, const int total_dl_matrix_row_num, const int total_dl_matrix_col_num) {
// printf("%d %d\n", vertex_num, total_dl_matrix_row_num);
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
for (int j = 0, cur_index = i * total_dl_matrix_col_num; j < vertex_num;
j++, cur_index++) {
row_group[i] += (int)(dl_matrix[cur_index]) * (j + 1);
}
}
}
__global__ void init_vertex_group(int *row_group, bool *dl_matrix, int *vertex_num, int *t_cn, int *t_rn, int *offset_row, int *offset_matrix, int graph_count) {
int k = blockIdx.x;
if (k < graph_count) {
get_vertex_row_group(row_group + offset_row[k],
dl_matrix + offset_matrix[k], vertex_num[k], t_rn[k],
t_cn[k]);
}
} |
8,682 | #include "includes.h"
__global__ void cudaS_ssdToOutput_kernels( unsigned int batchSize, unsigned int nbClass, unsigned int nbAnchors, unsigned int channelWidth, unsigned int channelHeight, unsigned int nbProposals, unsigned int* nbValidROIs, unsigned int cls, unsigned int totalParts, unsigned int totalTemplates, unsigned int maxParts, unsigned int maxTemplates, unsigned int cumulParts, unsigned int cumulTemplates, unsigned int nbParts, unsigned int nbTemplates, float xRatio, float yRatio, float xOutputRatio, float yOutputRatio, const float* roi_bbox, const float* roi_anchors, const float* anchors, const float* inputs_parts, const float* inputs_templates, float* outputs)
{
const int batchPos = blockIdx.z;
const int proposal = (threadIdx.x & 0x1f) + blockIdx.x*blockDim.x;
const int ptIdx = blockIdx.y;
const int nbDetectedObject = (int) nbValidROIs[batchPos];
const int nbIdx = 6;
if(proposal < nbProposals)
{
const unsigned int n = proposal + cls*nbProposals + batchPos*nbProposals*nbClass;
if(proposal < nbDetectedObject)
{
if(ptIdx == 0)
{
outputs[0 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[0 + 5*proposal + batchPos*nbProposals*5];
outputs[1 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[1 + 5*proposal + batchPos*nbProposals*5];
outputs[2 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[2 + 5*proposal + batchPos*nbProposals*5];
outputs[3 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[3 + 5*proposal + batchPos*nbProposals*5];
outputs[4 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = roi_bbox[4 + 5*proposal + batchPos*nbProposals*5];
outputs[5 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = (float) cls;
}
if(ptIdx < nbParts && totalParts > 0)
{
const unsigned int xa = roi_anchors[0 + 5*proposal + batchPos*nbProposals*5];
const unsigned int ya = roi_anchors[1 + 5*proposal + batchPos*nbProposals*5];
const unsigned int k = roi_anchors[2 + 5*proposal + batchPos*nbProposals*5];
const int yIdx = xa
+ ya*channelWidth
+ (k*nbParts*2 + cumulParts + ptIdx*2)*channelHeight*channelWidth
+ batchPos*channelHeight*channelWidth*nbAnchors*2*totalParts;
const int xIdx = xa
+ ya*channelWidth
+ (k*nbParts*2 + cumulParts + ptIdx*2 + 1)*channelHeight*channelWidth
+ batchPos*channelHeight*channelWidth*nbAnchors*2*totalParts;
const float partY = inputs_parts[yIdx];
const float partX = inputs_parts[xIdx];
const int xa0 = (int)(anchors[k*4] + xa * xRatio);
const int ya0 = (int)(anchors[k*4 + 1] + ya * yRatio);
const int xa1 = (int)(anchors[k*4 + 2] + xa * xRatio);
const int ya1 = (int)(anchors[k*4 + 3] + ya * yRatio);
// Anchors width and height
const int wa = xa1 - xa0;
const int ha = ya1 - ya0;
// Anchor center coordinates (xac, yac)
const float xac = xa0 + wa / 2.0;
const float yac = ya0 + ha / 2.0;
const float predPartY = ((partY) * ha + yac)*yOutputRatio ;
const float predPartX = ((partX) * wa + xac)*xOutputRatio ;
outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = predPartY;
outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = predPartX;
}
else if(ptIdx < maxParts && totalParts > 0)
{
outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
}
///for(unsigned int t = 0; t < nbTemplates; ++t)
if(ptIdx < nbTemplates && totalTemplates > 0)
{
const unsigned int xa = roi_anchors[0 + 5*proposal + batchPos*nbProposals*5];
const unsigned int ya = roi_anchors[1 + 5*proposal + batchPos*nbProposals*5];
const unsigned int k = roi_anchors[2 + 5*proposal + batchPos*nbProposals*5];
const int yIdx = xa
+ ya*channelWidth
+ (k*nbTemplates*3 + cumulTemplates + ptIdx*3)*channelHeight*channelWidth
+ batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates;
const int xIdx = xa
+ ya*channelWidth
+ (k*nbTemplates*3 + cumulTemplates + ptIdx*3 + 1)*channelHeight*channelWidth
+ batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates;
const int zIdx = xa
+ ya*channelWidth
+ (k*nbTemplates*3 + cumulTemplates + ptIdx*3 + 2)*channelHeight*channelWidth
+ batchPos*channelHeight*channelWidth*nbAnchors*3*totalTemplates;
const float templateY = expf(inputs_templates[yIdx]);
const float templateX = expf(inputs_templates[xIdx]);
const float templateZ = expf(inputs_templates[zIdx]);
outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateY;
outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateX;
outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = templateZ;
}
else if(ptIdx < maxTemplates && totalTemplates > 0)
{
outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
}
}
else
{
outputs[0 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0;
outputs[1 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0;
outputs[2 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0;
outputs[3 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0;
outputs[4 + n*(nbIdx + maxParts*2 + maxTemplates*3)] = 0.0;
//for(unsigned int p = 0; p < nbParts; ++p)
if(ptIdx < maxParts && totalParts > 0)
{
outputs[ptIdx*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
outputs[ptIdx*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
}
//for(unsigned int t = 0;t < nbTemplates; ++t)
if(ptIdx < maxTemplates && totalTemplates > 0)
{
outputs[ptIdx*3 + maxParts*2 + 0 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
outputs[ptIdx*3 + maxParts*2 + 1 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
outputs[ptIdx*3 + maxParts*2 + 2 + nbIdx + n*(nbIdx + maxParts*2 + maxTemplates*3) ] = 0.0;
}
}
}
} |
8,683 | #include "includes.h"
using namespace std;
__global__ void computeDistances(int numInstances, int numAttributes, float* dataset, float* distances)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int row = tid / numInstances; // instance1Index
int column = tid - ((tid / numInstances) * numInstances); //instance2Index
if ((tid < numInstances * numInstances))
{
float sum = 0;
int instance1 = row * numAttributes;
int instance2 = column * numAttributes;
for (int atIdx = 1; atIdx < numAttributes; atIdx++) // start at 1 so we don't compare the id of each city
{
sum += ((dataset[instance1 + atIdx] - dataset[instance2 + atIdx]) * (dataset[instance1 + atIdx] - dataset[instance2 + atIdx]));
}
distances[row * numInstances + column] = (float) sqrt(sum);
distances[column * numInstances + row] = distances[row * numInstances + column]; //set the distance for the other half of the pair we just computed
}
} |
8,684 | //#pragma once
//// CUDA Runtime
//#include <cuda_runtime.h>
//#include <device_functions.h>
//#include <device_launch_parameters.h>
//// Utilities and system includes
//#include <helper_cuda.h>
//#include <helper_functions.h>
//#include <thrust/device_vector.h>
//#include <thrust/scan.h>
//#include <iostream>
//#include <math.h>
//#include "XModel.h"
//#include "math_functions.hpp"
//#include "cuda_runtime_api.h"
//
//using namespace cudacp;
//
//#ifndef MIN
//#define MIN(x,y) ((x < y) ? x : y)
//#endif
//// num_threads
//static const int num_threads = 128;
//static const int U32_SIZE = sizeof(u32); ///<4
//static const int U32_BIT = U32_SIZE * 8; ///<32
//static const int U32_POS = 5;
//static const int U32_MOD_MASK = 31;
//
//struct int_predicate
//{
// __host__ __device__ bool operator()(const int x)
// {
// return x > 0;
// }
//};
//
//// һbitDom[x]ij
//__constant__ int D_BITDOM_INTSIZE;
//// bitDomij
//
//__constant__ int D_BITDOMS_INTSIZE;
//
//// bit֧֣ uint2 bitSup[c][a][idx].x = bitSup[c,x,a,idx]
//// bit֧֣ uint2 bitSup[c][a][idx].y = bitSup[c,y,a,idx]
////__device__ uint2*** d_bitSup;
////__host__ uint2*** h_bitSup;
////__device__ u32** d_bitDom;
////__host__ u32** h_bitDom;
////__device__ u32**
//__constant__ int D_NUM_BD_BLOCK;
//__constant__ int D_NUM_CS_SIZE_BLOCKS;
////////////////////////////////////////////////////////////////////////////
//// һЩGPU
////////////////////////////////////////////////////////////////////////////
//__device__ __managed__ int NUM_BD_BLOCK;
//__device__ __managed__ int NUM_CS_SIZE_BLOCKS;
//// һint
//__device__ __managed__ int BITDOM_INTSIZE;
//// ϵint
//__device__ __managed__ int BITDOMS_INTSIZE;
//// һԼbitsupint
//__device__ __managed__ int BITSUP_INTSIZE;
//// Լϵbitsupint
//__device__ __managed__ int BITSUPS_INTSIZE;
////ܳ
//__device__ __managed__ int BITSUBDOMS_INTSIZE;
////
//__device__ __managed__ int VS_SIZE;
////Լ
//__device__ __managed__ int CS_SIZE;
////ԼѹBLOCK
//__device__ __managed__ int MCC_BLOCK;
////////////////////////////////////////////////////////////////////////////
//// һЩGPU
////////////////////////////////////////////////////////////////////////////
//__device__ __managed__ int M_Qsize;
//
////////////////////////////////////////////////////////////////////////////
//// GPUԼ¼Ϣɸ
////////////////////////////////////////////////////////////////////////////
//// ÿintС
//__device__ __managed__ int *vars_size;
//// 洢Լscopeint3scope.x: x.id; scope.y: y.id; scope.z: c.id
//__device__ __managed__ int3* scope;
//// dom
//__device__ __managed__ int MAX_DOM_SIZE;
//// subCon
//__device__ __managed__ int SUBCON_SIZE;
//
//
////__device__ __managed__ int BITDOM_SIZE;
////__device__ __managed__ int
//// ݽṹʹUM
//// ʾԼ
//__device__ __managed__ u32* bitDom;
//// ʾԼ
//__device__ __managed__ uint2* bitSup;
//////ƶУ洢Լid
////__device__ __managed__ int *mainCon;
//////ݽṹ
////ʾԼ
//__device__ __managed__ u32* bitSubDom;
//////ƶУ洢Լid subCon.x: variablesubCon.y: valuesubCon.z: c.id
////__device__ __managed__ ushort3* subCon;
////ǷɾʼȫΪ1
//__device__ __managed__ int* M_VarPre;
////ԼǷ飬ʼȫΪ1
//__device__ __managed__ int* M_ConPre;
////Լ(ѹ)
//__device__ __managed__ int* M_ConEvt;
////Լ
//__device__ __managed__ int* M_Con;
//
////ǷɾʼȫΪ1
//__device__ __managed__ int* S_VarPre;
////¼
//__device__ __managed__ uint3* S_Var;
////ԼǷ飬ʼȫΪ1
//__device__ __managed__ int* S_ConPre;
////Լ(ѹ)
//__device__ __managed__ int3* S_ConEvt;
////Լ
//__device__ __managed__ int3* S_Con;
////
//int* MCC_BlocksCount;
//int* MCC_BlocksOffset;
//
//thrust::device_vector<int> MCC_BCount;
//thrust::device_vector<int> MCC_BOffset;
////
////__device__ __managed__ ushort4* subVar;
//////ⷢĶıidʼȫΪ0
////__device__ __managed__ unsigned short* subEvtVar;
//////ⷢĶԼidʼȫΪ1
////__device__ __managed__ int* subEvtCon;
//
//static const u32 U32_MASK1[32] = {
// 0x80000000, 0x40000000, 0x20000000, 0x10000000,
// 0x08000000, 0x04000000, 0x02000000, 0x01000000,
// 0x00800000, 0x00400000, 0x00200000, 0x00100000,
// 0x00080000, 0x00040000, 0x00020000, 0x00010000,
// 0x00008000, 0x00004000, 0x00002000, 0x00001000,
// 0x00000800, 0x00000400, 0x00000200, 0x00000100,
// 0x00000080, 0x00000040, 0x00000020, 0x00000010,
// 0x00000008, 0x00000004, 0x00000002, 0x00000001,
//};
//
//static const u32 U32_MASK0[32] = {
// 0x7FFFFFFF, 0xBFFFFFFF, 0xDFFFFFFF, 0xEFFFFFFF,
// 0xF7FFFFFF, 0xFBFFFFFF, 0xFDFFFFFF, 0xFEFFFFFF,
// 0xFF7FFFFF, 0xFFBFFFFF, 0xFFDFFFFF, 0xFFEFFFFF,
// 0xFFF7FFFF, 0xFFFBFFFF, 0xFFFDFFFF, 0xFFFEFFFF,
// 0xFFFF7FFF, 0xFFFFBFFF, 0xFFFFDFFF, 0xFFFFEFFF,
// 0xFFFFF7FF, 0xFFFFFBFF, 0xFFFFFDFF, 0xFFFFFEFF,
// 0xFFFFFF7F, 0xFFFFFFBF, 0xFFFFFFDF, 0xFFFFFFEF,
// 0xFFFFFFF7, 0xFFFFFFFB, 0xFFFFFFFD, 0xFFFFFFFE,
//};
//
////__forceinline__ int GetBitDomIndex(int var_id)
////{
//// return var_id * BITDOM_INTSIZE;
////}
//
//// xindexbitDomλ
//#define GetBitDomIndex(x, i) (x * BITDOM_INTSIZE + i)
//// ֵֵĸȡbitʾƫ
//#define GetOffSet(x)(U32_BIT - (x & U32_MOD_MASK))
//
//#define GetBitSubDomStartIndex(x,a)((x * MAX_DOM_SIZE + a) * BITDOMS_INTSIZE)
//#define GetBitSubDomIndex(x, a, y, i)(GetBitSubDomStartIndex(x,a) + GetBitDomIndex(y, i))
//
//__device__ bool IsGtZero(int x)
//{
// return x > 0;
//}
//
//__inline__ __device__ __host__ int GetTopNum(int num_elements, int num_threads)
//{
// return (num_elements + (num_threads - 1)) / num_threads;
//}
////************************************
//// Method: intsizeof
//// FullName: intsizeof
//// Access: public
//// Returns: int
//// Qualifier: ȡbitʾint
//// Parameter: const int x
////************************************
//inline int intsizeof(const int x)
//{
// return (int)ceil((float)x / U32_BIT);
//}
//
//__device__ __inline__ int pow2i(int e)
//{
// return 1 << e;
//}
////__global__ void enforceACMain(u32* bitDom, u32* bitSup, u32* M_Con, u32* M_ConEvt, u32* M_ConPre, u32* M_VarPre)
////{
////
////}
//
////ͨѸıԼ
//__global__ void GenConPre(int *VarPre, int* BlocksCount, int3* scp, int len)
//{
// const int idx = blockDim.x*blockIdx.x + threadIdx.x;
// if (idx < len)
// {
// int3 sp = scp[idx];
// int pred;
// if (VarPre[sp.x] == 1 || VarPre[sp.y] == 1)
// pred = 1;
// else
// pred = 0;
//
// int BC = __syncthreads_count(pred);
//
// if (threadIdx.x == 0)
// {
// BlocksCount[threadIdx.x] = BC;
// }
// }
//}
//
//__global__ void CompactQ(int *VarPre, int* ConEvt, int* BOffset, int3* scp, int len)
//{
// int idx = threadIdx.x + blockIdx.x*blockDim.x;
// extern __shared__ int warpTotals[];
// //һ߳̿128߳
// //һ4߳
// if (idx < len)
// {
// int3 sp = scp[idx];
// int pred;
// //ж
// if (VarPre[sp.x] == 1 || VarPre[sp.y] == 1)
// pred = 1;
// else
// pred = 0;
//
// //warp index
// //߳
// int w_i = threadIdx.x / warpSize;
// //thread index within a warp
// //߳߳
// int w_l = idx % warpSize;
// //thread mask (ERROR IN THE PAPERminus one is required)
// //߳
// //INT_MAX = 1111 1111 1111 1111 1111 1111 1111 1111
// //߳id=032-0-1 = 31λ Ҳʣ1λ
// //߳id=532-5-1 = 26λ Ҳʣ6λ
// //߳id=3132-31-1 = 0λ Ҳʣ32λ
// //߳threid| 31~~~~~~0
// //ballotӦλ| 1......1
// int t_m = INT_MAX >> (warpSize - w_l - 1);
// //balres = number whose ith bit is one if the ith's thread pred is true masked up to the current index in warp
// //߳ھֲpred = 1밴λ뵫˵߳idļ¼ֻǰж
// int b = __ballot(pred) & t_m;
// //popc count the number of bit one. simply count the number predicated true BEFORE MY INDEX
// //ֻ㵱ǰ߳ӦǰNλ֮
// //Ϊ߳ɨ
// int t_u = __popc(b);
//
// //ÿ߳һ߳д빲ڴ棬ӦidΪ߳ID߳IDӻأ
// //͵ֵд빲ڴ棬͵ֵûб
// //warpTotalsΪ4
// if (w_l == warpSize - 1)
// warpTotals[w_i] = t_u + pred;
//
// __syncthreads();
//
// //߳idΪ0߳߳idblockDim.x = 128w_l < 128/32 = 4
// //߳̿ڵһ߳ǰ4߳w_l < ߳4ÿ߳һ߳
// if (w_i == 0 && w_l < blockDim.x / warpSize)
// {
// int w_i_u = 0;
// for (int j = 0; j <= 5; ++j)
// {
// //# of the ones in the j'th digit of the warp offsets
// //0->5 6λã
// //000 001
// //000 010
// //000 100
// //001 000
// //010 000
// //100 000
// int b_j = __ballot(warpTotals[w_l] & pow2i(j));
// w_i_u += (__popc(b_j & t_m)) << j;
// //printf("indice %i t_m=%i,j=%i,b_j=%i,w_i_u=%i\n",w_l,t_m,j,b_j,w_i_u);
// }
// warpTotals[w_l] = w_i_u;
// }
// __syncthreads();
//
// if (pred)
// ConEvt[t_u + warpTotals[w_i] + BOffset[blockIdx.x]] = scp[idx].z;
//
// }
//}
//
//void CompactQueueMain()
//{
// //Լ
// //P1
// GenConPre << <MCC_BLOCK, num_threads >> >(M_VarPre, MCC_BlocksCount, scope, CS_SIZE);
// cudaDeviceSynchronize();
// //P2
// thrust::exclusive_scan(MCC_BCount.begin(), MCC_BCount.end(), MCC_BOffset.begin());
// cudaDeviceSynchronize();
// //P3
// //ÿԼһ߳̽йԼ,ڴС = һ߳ĸ,װ߳
// CompactQ << <MCC_BLOCK, num_threads, sizeof(int)*(num_threads / warpSize) >> >(M_VarPre, M_Con, MCC_BlocksOffset, scope, CS_SIZE);
//}
//
//#define GetBitSupIndexByINTPrstn(cid,x_val,y_val) (cid * BITSUP_INTSIZE + x_val * BITDOM_INTSIZE + y_val)
//
//__inline__ __device__ __host__ int2 GetBitSupIndexByTuple(int cid, int2 t)
//{
// return make_int2(cid * BITSUP_INTSIZE + t.x * BITDOM_INTSIZE + (t.y >> U32_POS), cid * BITSUP_INTSIZE + t.y * BITDOM_INTSIZE + (t.x >> U32_POS));
//}
//
//__inline__ __device__ __host__ int GetBitSupIndexById(int cid)
//{
// return cid * BITSUP_INTSIZE;
//}
//
//void DelGPUModel();
//
//void BuildBitModel(XModel *xm)
//{
//#pragma region 㳣
// BITDOM_INTSIZE = intsizeof(xm->feature.max_dom_size);
// MAX_DOM_SIZE = xm->feature.max_dom_size;
// VS_SIZE = xm->feature.vs_size;
// CS_SIZE = xm->feature.cs_size;
// BITDOMS_INTSIZE = BITDOM_INTSIZE * VS_SIZE;
// BITSUP_INTSIZE = MAX_DOM_SIZE * BITDOM_INTSIZE;
// BITSUPS_INTSIZE = BITSUP_INTSIZE * CS_SIZE;
// BITSUBDOMS_INTSIZE = VS_SIZE * MAX_DOM_SIZE * BITDOMS_INTSIZE;
// SUBCON_SIZE = VS_SIZE * MAX_DOM_SIZE * CS_SIZE;
//#pragma endregion 㳣
//#pragma region ԼϢ
// //cudaMallocManaged(&vars_size, sizeof(int) * VS_SIZE);
// //// ʼС
// //for (int i = 0; i < xm->feature.vs_size; ++i)
// //{
// // XVar* v = xm->vars[i];
// // XDom* d = xm->doms[v->dom_id];
// // vars_size[i] = d->size;
// //}
//
// // ʼscope
// cudaMallocManaged(&scope, sizeof(int3) * CS_SIZE);
// for (int i = 0; i < CS_SIZE; ++i)
// {
// XCon *c = xm->cons[i];
// scope[i].x = c->scope[0];
// scope[i].y = c->scope[1];
// scope[i].z = c->id;
// }
//
// ////ʾ
// //for (int i = 0; i < CS_SIZE; ++i)
// //{
// // printf("scope[%d] = {%d, %d}\n", scope[i].z, scope[i].x, scope[i].y);
// //}
//#pragma endregion ԼϢ
//#pragma region bitDom
// cudaMallocManaged(&bitDom, sizeof(u32) * BITDOMS_INTSIZE);
// cudaMallocManaged(&M_VarPre, sizeof(int) * VS_SIZE);
//
// for (int i = 0; i < VS_SIZE; ++i)
// {
// XVar* v = xm->vars[i];
// XDom* d = xm->doms[v->dom_id];
// const int dom_size = d->size;
// // ǰʵINT
// const int dom_int_size = intsizeof(dom_size);
//
// for (int j = 0; j < BITDOM_INTSIZE; ++j)
// {
// const int idx = GetBitDomIndex(i, j);
// //printf("idx = %d\n", idx);
// //
// if (j < dom_int_size - 1)
// bitDom[idx] = UINT32_MAX;
// else if (j == dom_int_size - 1)
// bitDom[idx] = UINT32_MAX << GetOffSet(dom_size);
// else
// bitDom[idx] = 0;
// }
//
// M_VarPre[i] = 1;
// }
//
// //for (int i = 0; i < VS_SIZE; ++i)
// //{
// // for (int j = 0; j < BITDOM_INTSIZE; ++j)
// // {
// // int idx = GetBitDomIndex(i, j);
// // printf("var = %d, j = %d, idx = %d, bitDom = %x, pre= %x\n", i, j, idx, bitDom[idx], M_VarPre[i]);
// // }
// //}
//#pragma endregion bitDom
//#pragma region bitSubDom
// cudaMallocManaged(&bitSubDom, sizeof(u32)*BITDOMS_INTSIZE*VS_SIZE*MAX_DOM_SIZE);
// for (int i = 0; i < VS_SIZE; ++i)
// {
// for (int j = 0; j < MAX_DOM_SIZE; ++j)
// {
// const int start_idx = GetBitSubDomStartIndex(i, j);
// for (int k = 0; k < BITSUBDOMS_INTSIZE; ++k)
// bitSubDom[start_idx + k] = bitDom[k];
// //(i,j)bitDom ĵ
// //ȡi,j,iʼַ
// const int ijistart = start_idx + i*BITDOM_INTSIZE;
// for (int k = 0; k < BITDOM_INTSIZE; ++k)
// // jKķΧ:j/32,j%32λΪ1
// if (k == j >> U32_POS)
// bitSubDom[ijistart + k] = U32_MASK1[j&U32_MOD_MASK];
// //λΪ0
// else
// bitSubDom[ijistart + k] = 0;
// }
// }
//
// //for (int i = 0; i < VS_SIZE; ++i)
// //{
// // for (int j = 0; j < MAX_DOM_SIZE; ++j)
// // {
// // printf("sub problem:(%d, %d): ", i, j);
// // const int start_idx = GetBitSubDomStartIndex(i, j);
// // for (int k = 0; k < BITDOMS_INTSIZE; ++k)
// // {
// // printf("%x ", bitSubDom[start_idx + k]);
// // }
// // printf("\n");
// // }
// //}
//#pragma endregion bitSubDom
//#pragma region bitSup
// cudaMallocManaged(&bitSup, sizeof(uint2) * BITSUPS_INTSIZE);
// for (int i = 0; i < CS_SIZE; ++i)
// {
// XCon* c = xm->cons[i];
// XRel* r = xm->rels[c->rel_id];
// XVar* v[2] = { xm->vars[c->scope[0]], xm->vars[c->scope[1]] };
// XDom* d[2] = { xm->doms[v[0]->dom_id], xm->doms[v[1]->dom_id] };
//
// //ʼλ
// for (int j = 0; j < MAX_DOM_SIZE; ++j)
// {
// for (int k = 0; k < BITDOM_INTSIZE; ++k)
// {
// const int idx = GetBitSupIndexByINTPrstn(c->id, j, k);
// if (j < d[0]->size && (k < (d[1]->size >> U32_POS)))
// {
// //֧ȡ0x0000..., ͻȡ0xFFF...
// bitSup[idx].x = (r->sem == SEM_CONFLICT) ? UINT32_MAX : 0;
// bitSup[idx].y = (r->sem == SEM_CONFLICT) ? UINT32_MAX : 0;
// }
// else if (k == (d[1]->size >> U32_POS))
// {
// bitSup[idx].x = (r->sem == SEM_CONFLICT) ? UINT32_MAX : 0;
// bitSup[idx].y = (r->sem == SEM_CONFLICT) ? UINT32_MAX : 0;
// bitSup[idx].x <<= U32_BIT - (d[1]->size & U32_MOD_MASK);
// bitSup[idx].y <<= U32_BIT - (d[1]->size & U32_MOD_MASK);
// }
// else
// {
// bitSup[idx].x = 0;
// bitSup[idx].y = 0;
// }
// }
// }
// //λֵ
// for (int j = 0; j < r->size; ++j)
// {
// const int2 t = make_int2(r->tuples[j][0], r->tuples[j][1]);
// //printf("c_id= %d, %d, %d\n", c->id, t.x, t.y);
// const int2 idx = GetBitSupIndexByTuple(c->id, t);
// //printf("idx = %d, %d\n", idx.x, idx.y);
// if (r->sem == SEM_SUPPORT)
// {
// bitSup[idx.x].x |= U32_MASK1[t.y & U32_MOD_MASK];
// bitSup[idx.y].y |= U32_MASK1[t.x & U32_MOD_MASK];
// }
// else
// {
// bitSup[idx.x].x &= U32_MASK0[t.y & U32_MOD_MASK];
// bitSup[idx.y].y &= U32_MASK0[t.x & U32_MOD_MASK];
// }
// }
// //// ʼλ
// //for (int j = 0; j < MAX_DOM_SIZE; ++j)
// //{
// // printf("c_id = %d, j = %d: ", i, j);
// // for (int k = 0; k < BITDOM_INTSIZE; ++k)
// // {
// // const int idx = GetBitSupIndexByINTPrstn(c->id, j, k);
// // printf("%x, %x", bitSup[idx].x, bitSup[idx].y);
// // }
// // printf("\n");
// //}
// }
//#pragma endregion bitSup
//#pragma region Լ
// cudaMallocManaged(&M_Con, sizeof(int)*CS_SIZE);
// cudaMallocManaged(&M_ConEvt, sizeof(int) * CS_SIZE);
// cudaMallocManaged(&M_ConPre, sizeof(int)*CS_SIZE);
//
// for (int i = 0; i < CS_SIZE; ++i)
// {
// M_Con[i] = i;
// M_ConEvt[i] = i;
// M_ConPre[i] = 1;
// //printf("i = %d , M_Con = %d, M_ConEvt = %d, M_ConPre = %d\n", i, M_Con[i], M_ConEvt[i], M_ConPre[i]);
// }
//#pragma endregion Լ
//#pragma region Լ
// cudaMallocManaged(&S_ConPre, sizeof(int)*SUBCON_SIZE);
// cudaMallocManaged(&S_ConEvt, sizeof(int3)*SUBCON_SIZE);
// cudaMallocManaged(&S_Con, sizeof(int3)*SUBCON_SIZE);
// cudaMallocManaged(&S_VarPre, sizeof(int)*VS_SIZE*MAX_DOM_SIZE*VS_SIZE);
// cudaMallocManaged(&S_Var, sizeof(int3)*VS_SIZE*MAX_DOM_SIZE*VS_SIZE);
//
// for (int i = 0; i < VS_SIZE; ++i)
// {
// const XVar* v = xm->vars[i];
// for (int j = 0; j < MAX_DOM_SIZE; ++j)
// {
// for (int k = 0; k < CS_SIZE; ++k)
// {
// // (i, j) kΪԼid
// const int idx = (i*MAX_DOM_SIZE + j)*CS_SIZE + k;
// //i*xm->feature.max_dom_size*xm->feature.cs_size + j*xm->feature.cs_size + k;
//
// S_Con[idx].x = i;
// S_Con[idx].y = j;
// S_Con[idx].z = k;
//
// S_ConEvt[idx].x = i;
// S_ConEvt[idx].y = j;
// S_ConEvt[idx].z = k;
//
// S_ConPre[idx] = 1;
// //printf("S_Con = (%d, %d, %d), S_ConEvt = (%d, %d, %d), pre = %d\n", S_Con[idx].x, S_Con[idx].y, S_Con[idx].z, S_ConEvt[idx].x, S_ConEvt[idx].y, S_ConEvt[idx].z, S_ConPre[idx]);
// }
//
// for (int k = 0; k < VS_SIZE; ++k)
// {
// //(i, j) kΪid
// const int idx = (i*MAX_DOM_SIZE + j)*VS_SIZE + k;
// S_Var[idx].x = i;
// S_Var[idx].y = j;
// S_Var[idx].z = k;
//
// S_VarPre[idx] = 1;
//
// //printf("S_Var = (%d, %d, %d), S_VarPre = %d\n", S_Var[idx].x, S_Var[idx].y, S_Var[idx].z, S_VarPre[idx]);
// }
// }
// }
//#pragma endregion Լ
//
//#pragma region й
// //ѹBLOCK
// MCC_BLOCK = GetTopNum(CS_SIZE, num_threads);
// MCC_BCount.resize(MCC_BLOCK, 0);
// MCC_BOffset.resize(MCC_BLOCK, 0);
// MCC_BlocksCount = thrust::raw_pointer_cast(MCC_BCount.data());
// MCC_BlocksOffset = thrust::raw_pointer_cast(MCC_BOffset.data());
//#pragma endregion
//
//}
//
//__global__ void ConCheckMain(int* ConEvt, int* btSp, int2* scp)
//{
// const int c_id = blockIdx.x;
// //ȡԼbitSupĿʼ
// const int start_idx = GetBitSupIndexById(c_id);
// const int2 sp = scp[c_id];
// extern __shared__ int2[];
//}
//
//void ConstraintCheckMain()
//{
// //num_threadsÿɱ
// ConCheckMain << <CS_SIZE, num_threads >> >();
//}
//
//float SACGPU()
//{
// //1. ִAC
// //1.1. ѹ
// CompactQueueMain();
// //1.2. Լ
// ConstraintCheckMain();
//}
//
//void DelGPUModel()
//{
// cudaFree(scope);
// cudaFree(bitDom);
// cudaFree(M_VarPre);
// cudaFree(bitSubDom);
// cudaFree(bitSup);
// cudaFree(M_Con);
// cudaFree(M_ConEvt);
// cudaFree(M_ConPre);
// cudaFree(S_ConPre);
// cudaFree(S_ConEvt);
// cudaFree(S_Con);
// cudaFree(S_Var);
// cudaFree(S_VarPre);
//}
//
//
|
8,685 | #include "cost.hh"
#include "../ops/ops-builder.hh"
#include "../ops/softmax-cross-entropy.hh"
#include "../ops/mse.hh"
ops::Op* quadratic_cost(ops::Op* y, ops::Op* y_hat)
{
auto& builder = ops::OpsBuilder::instance();
return builder.mse(y, y_hat);
}
ops::Op* softmax_cross_entropy(ops::Op* y, ops::Op* logits)
{
auto& builder = ops::OpsBuilder::instance();
return builder.softmax_cross_entropy(y, logits);
}
|
8,686 | __global__ void conv(const float *A, const float *B, int aw, int ah, int bw, int bh, int b_sum, float *C){
/*Get row and column to operate on from thread coordinates*/
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
/*Calculate "padding" radius of convolution kernel (distance around central pixel)*/
int pw = (bw-1)/2;
int ph = (bh-1)/2;
/*If within the range of C (ie A - padding)*/
if( row < (ah-2*ph) && col < (aw-2*pw) ) {
/*Set initial pixel value*/
int val = 0;
/*For each vertical position on the kernel matrix, relative to the central pixel*/
for(int i=-ph; i<=ph; i=i+1){
/*Calculate zero-indexed row ID on kernel matrix*/
int b_row = i+ph;
/*For each horizontal position on the kernel matrix, relative to the central pixel*/
for(int j=-pw; j<=pw; j=j+1){
/*Calculate zero-indexed column ID on kernel matrix*/
int b_col = j+pw;
/*Add product of kernel value and corresponding image value to running total*/
val += A[ (row+ph +i)*aw + (col+pw +j) ] * B[ b_row*bw + b_col ];
}
}
/*Copy appropriately normalised resulting pixel value to position on C matrix*/
C[row*(aw-2*pw) + col] = val/b_sum;
}
} |
8,687 | extern "C" {
/*
#define BLOCK_SIZE 16
__global__ void matrix_multiply(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
__shared__ float matA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float matB[BLOCK_SIZE][BLOCK_SIZE];
const int tidc = threadIdx.x;
const int tidr = threadIdx.y;
const int bidc = blockIdx.x * BLOCK_SIZE;
const int bidr = blockIdx.y * BLOCK_SIZE;
int i, j;
float results = 0;
float comp = 0;
for(j = 0; j < n; j += BLOCK_SIZE) {
matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];
__syncthreads();
for(i = 0; i < BLOCK_SIZE; i++) {
float t;
comp -= matA[tidr][i] * matB[i][tidc];
t = results - comp;
comp = (t - results) + comp;
results = t;
}
__syncthreads();
}
c[(tidr + bidr) * ldc + tidc + bidc] = results;
}
*/
__global__ void matrix_multiply_0(const float* _A,const float *_B,float* _C,int _wa,int _wb)
{
float sum = 0;
//找出该线程所在的行列
int row = blockIdx.y*blockDim.y + threadIdx.y; // X 对应矩阵row, Y对应举证col
int col = blockIdx.x*blockDim.x + threadIdx.x;
//线程Thread(row,col)负责计算C(row,col)
for (int i = 0; i < _wa; ++i)
{
sum += _A[row*_wa + i]*_B[i*_wb + col];
}
_C[row*_wb + col] = sum;
}
__global__ void matrix_multiply_1(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
float sum = 0.0f;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if(row < numCRows && col < numCColumns){
for (int i = 0; i < numAColumns; ++i)
{
sum += A[row*numAColumns + i] * B[i*numBColumns + col];
}
C[row*numBColumns + col] = sum;
}
//printf("C = %f\n",C[row*numBColumns + col]);
}
__global__ void matrix_elementwise_multiply(float * A,float * B,float *C,int width,int height){
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int idx = xIndex + yIndex * width;
if(xIndex < width && yIndex < height){
C[idx] = A[idx] * B[idx];
}
}
} |
8,688 | #include "includes.h"
__global__ void convert_float2bgr(float* annd, unsigned char* bgr, int w, int h, float minval, float maxval)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < w && y < h)
{
int id = y * w + x;
int err = max(min((annd[id] - minval) / (maxval - minval), 1.f), 0.f) * 255.f;
bgr[id] = err;
}
} |
8,689 | #include "includes.h"
__global__ void matrixMulCUDA3(float *C, float *A, float *B, int n) { int start_row = blockDim.y * blockIdx.y * TILE_WIDTH + threadIdx.y * TILE_WIDTH; int end_row = start_row + TILE_WIDTH; int start_col = blockDim.x * blockIdx.x * TILE_WIDTH + threadIdx.x * TILE_WIDTH; int end_col = start_col + TILE_WIDTH; for (int row = start_row; row < end_row; row++) { for (int col = start_col; col < end_col; col++) { float C_val = 0; for (int k = 0; k < n; ++k) { float A_elem = A[row * n + k]; float B_elem = B[k * n + col]; C_val += A_elem * B_elem; } C[row*n + col] = C_val; } } } |
8,690 | /* Using a dataset consisting of three arrays: A, B and C, the operation
Cx = Ax + Bx is performed on each element.
This code is executed on the host using the CPU. The result compute time is
displayed at the end of the computing.*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void host_add(int *a, int *b, int *c, int size){
/* Add two numbers and put them in an array of C (Host computation)
a, b and c must be arrays */
for(int i=0; i<size; i++)
c[i] = a[i] + b[i];
}
void fill_array(int *data, int size){
/* Fill an array with the index */
for(int i=0; i<size; i++)
data[i] = i;
}
void print_output(int *a, int *b, int *c, int size){
for(int i=0; i<size; i++)
printf("\n %d + %d = %d", a[i], b[i], c[i]);
}
int main(int argc, char *argv[]) {
int sizeOfArray = 512;
if(argc > 1)
sizeOfArray = atoi(argv[1]);
int *a, *b, *c;
int size = sizeOfArray * sizeof(int);
struct timespec start, finish;
clock_gettime(CLOCK_REALTIME, &start);
/* Alloc space for host copies of a, b and c. Setup with input values */
a = (int*)malloc(size); fill_array(a, sizeOfArray);
b = (int*)malloc(size); fill_array(b, sizeOfArray);
c = (int*)malloc(size);
host_add(a, b, c, sizeOfArray);
/* Get compute time */
clock_gettime(CLOCK_REALTIME, &finish);
long ns = finish.tv_nsec - start.tv_nsec;
/* Print output */
print_output(a, b, c, sizeOfArray);
printf("\n\nTime to compute: %ld ns \n\n", ns);
free(a);
free(b);
free(c);
return 0;
} |
8,691 | #include "includes.h"
__global__ void _mat_add(float *ma, float *mb, float *target, float sa, float sb, int len){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < len){
target[tid] = sa * ma[tid] + sb * mb[tid];
}
} |
8,692 | #include <cstdio>
#include <cstdlib>
#define KMAX 10000
#define THREADS 32
#define BLOCKS 32
#define LoopMAX 2000
#define klucz 137
//czas GPU: user 0m0.336s
//czas CPU: user 0m44.523s
//Funkcje uruchamialne na GPU z CPU ---> "__global__"
__global__ void funkcja(int *miejsce) {
int indeks = threadIdx.x + blockIdx.x * blockDim.x;
// printf("th=%i block=%i dane_GPU[%i]=%i\n",
// threadIdx.x, blockIdx.x, indeks, miejsce[indeks]);
int liczba = miejsce[indeks];
for(int i=0; i<LoopMAX; ++i)
for(int j=0; j<LoopMAX; ++j)
liczba = (liczba + i * j) % klucz;
miejsce[indeks] = liczba;
}
void funkcja_CPU(int *miejsce) {
for(int indeks = 0; indeks < THREADS * BLOCKS; ++indeks) {
int liczba = miejsce[indeks];
for(int i=0; i<LoopMAX; ++i)
for(int j=0; j<LoopMAX; ++j)
liczba = (liczba + i * j) % klucz;
miejsce[indeks] = liczba;
}
}
int main(void) {
int threads_per_block = THREADS;
int blocks = BLOCKS;
int *dane;
dane = (int*) malloc(KMAX * 4); //alokacja CPU
int *dane_GPU;
cudaMalloc(&dane_GPU, KMAX * 4); //alokacja GPU
//wygenerowanie liczb (CPU)
for(int i=0; i<KMAX; ++i)
dane[i] = i;
//kopiowanie danych CPU --> GPU
//syntax: (cel, zrodlo, ilosc byteow, flaga)
//przesylanie powrotne (dane, dane_GPU, KMAX * 4, cudaMemcpyDeviceToHost)
// cudaMemcpy(dane_GPU, dane, KMAX * 4, cudaMemcpyHostToDevice);
// funkcja<<<threads_per_block, blocks>>>(dane_GPU);
// cudaDeviceSynchronize(); //oczekiwanie na koniec obliczen GPU
funkcja_CPU(dane);
free(dane); //dealokacja pamieci (CPU)
cudaFree(dane_GPU);
return 0;
}
|
8,693 | #include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define GRID_DIM (1u << 12)
#define BLOCK_DIM (1u << 10)
#define BLOCK_NUM 32
#define ONE_BLOCK (GRID_DIM * BLOCK_DIM)
#define N (BLOCK_NUM * ONE_BLOCK)
#define CUDA_SAFE_CALL(func) \
do { \
cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", \
cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while (0)
__global__ void blocked(float* a, float* b, float* c);
__global__ void many_elements(float* a, float* b, float* c);
int main(void)
{
float* a_h = (float*)malloc(N * sizeof(float));
float* b_h = (float*)malloc(N * sizeof(float));
for (size_t i = 0; i < N; i++) {
a_h[i] = (float)rand() / (float)RAND_MAX;
b_h[i] = (float)rand() / (float)RAND_MAX;
}
float *a, *b, *c;
CUDA_SAFE_CALL(cudaMalloc((void**)&a, N * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc((void**)&b, N * sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc((void**)&c, N * sizeof(float)));
CUDA_SAFE_CALL(cudaMemcpy(
a, a_h, N * sizeof(float), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(
b, b_h, N * sizeof(float), cudaMemcpyHostToDevice));
struct timeval time_start, time_end;
gettimeofday(&time_start, NULL);
#if 0
for (size_t i = 0; i < BLOCK_NUM; i++)
blocked<<<GRID_DIM, BLOCK_DIM>>>(
a + i * ONE_BLOCK, b + i * ONE_BLOCK, c + i * ONE_BLOCK);
#else
many_elements<<<GRID_DIM, BLOCK_DIM>>>(a, b, c);
#endif
gettimeofday(&time_end, NULL);
double sec = (double)(time_end.tv_sec - time_start.tv_sec)
+ (double)(time_end.tv_usec - time_start.tv_usec) / 1e6;
printf("%lf\n", sec);
/*
float* out_h = (float*)malloc(N * sizeof(float));
CUDA_SAFE_CALL(cudaMemcpy(
out_h, c, N * sizeof(float), cudaMemcpyDeviceToHost));
for (size_t i = 0; i < N; i++) {
printf("%lf\n", out_h[i]);
}
*/
return 0;
}
__global__ void blocked(float* a, float* b, float* c)
{
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
c[idx] = a[idx] * b[idx];
}
__global__ void many_elements(float* a, float* b, float* c)
{
size_t idx = BLOCK_NUM * (blockDim.x * blockIdx.x + threadIdx.x);
for (size_t i = 0; i < BLOCK_NUM; i++)
c[idx + i] = a[idx + i] * b[idx + i];
}
|
8,694 | #include "includes.h"
__global__ void middle_to_top(float* data, const int nx, const int ny)
{
float tmp;
for ( int c = 0; c < nx; ++c ) {
// Get the value in the top row
float last_val = data[ny/2*nx + c];
for ( int r = ny-1; r >= ny/2; --r ){
int idx = r*nx+c;
tmp = data[idx];
data[idx] = last_val;
last_val = tmp;
}
}
} |
8,695 | #include <cuda.h>
#define IDXV(x, y, ld) ((x) + (y) * (ld))
#define block 128
#define grid 256
__global__ static void pack_matrix(double* __restrict__ device_cube,
double* __restrict__ tmp_matrix,
const int n)
{
// for(int i=0; i<n; i++)
// for(int k=0; k<n; k++)
// tmp_matrix[i][k] = device_cube[i][0][k];
// tmp_matrix[i*N+k] = device_cube[i*N*N+k];
int ivx = IDXV(threadIdx.x, blockIdx.x, blockDim.x);
while(ivx < n*n){
int i = ivx / n;
int k = ivx - i * n;
tmp_matrix[ivx] = device_cube[i*n*n + k];
ivx += blockDim.x * gridDim.x;
}
}
__global__ static void unpack_matrix(double* __restrict__ tmp_matrix,
double* __restrict__ device_cube,
const int n)
{
// for(int i=0; i<n; i++)
// for(int k=0; k<n; k++)
// device_cube[i][n-1][k] = tmp_matrix[i][k];
//// device_cube[i*N*N+(n-1)*N+k] = tmp_matrix[i*N+k];
int ivx = IDXV(threadIdx.x, blockIdx.x, blockDim.x);
while(ivx < n*n){
int i = ivx / n;
int k = ivx - i * n;
device_cube[i*n*n+(n-1)*n+k] = tmp_matrix[ivx];
ivx += blockDim.x * gridDim.x;
}
}
extern "C"
void call_pack(double* __restrict__ device_cube,
double* __restrict__ tmp_matrix,
const int n)
{
pack_matrix <<< grid, block >>> (device_cube, tmp_matrix, n);
cudaDeviceSynchronize();
}
extern "C"
void call_unpack(double* __restrict__ tmp_matrix,
double* __restrict__ device_cube,
const int n)
{
unpack_matrix <<< grid, block >>> (tmp_matrix, device_cube, n);
cudaDeviceSynchronize();
}
|
8,696 | #include <thrust/functional.h>
#include <thrust/scan.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <thrust/device_vector.h>
#include <cuda.h>
#include <thrust/host_vector.h>
extern "C"
{
void ExclusiveScan(int * vertices_number, int n)
{
thrust::exclusive_scan(thrust::device_ptr<int>(vertices_number),
thrust::device_ptr<int>(vertices_number + n),
thrust::device_ptr<int>(vertices_number));
}} |
8,697 | #include <cuda.h>
__global__ void kernel(int N, float *g_result){
int n = threadIdx.x + blockDim.x * blockIdx.x;
/* compute area - rings start at 1 */
g_result[n] = ((float)M_PI*(n+1)*(n+1)) - ((float)M_PI*n*n);
}
/* only use extern if calling code is C */
extern "C"
{
/* driver for kernel */
void cudakernel(int N, float *g_result){
/* choose 256 threads per block for high occupancy */
int ThreadsPerBlock = 256;
/* find number of blocks */
int BlocksPerGrid = (N+ThreadsPerBlock-1)/ThreadsPerBlock;
/* invoke device on this block/thread grid */
kernel <<< BlocksPerGrid, ThreadsPerBlock >>> (N, g_result);
}
} |
8,698 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define dx 0.01
#define dy 0.01
#define rho 8800
#define C 381
#define lambda 384.0
#define tau 0.01
#define BLOCK_SIZE 16
__global__ void __laplas__(float *T,float *T_old, const int n, const int height)
{
double at = lambda / (rho * C);
int iA = n * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x;
if(blockDim.y * blockIdx.y + threadIdx.y>0 && blockDim.x * blockIdx.x + threadIdx.x>0 && blockDim.y * blockIdx.y + threadIdx.y < height && blockDim.x * blockIdx.x + threadIdx.x < n)
T[iA] = T_old[iA] + (tau / (dx * dx)) * at * (T_old[n * (blockDim.y * blockIdx.y + threadIdx.y-1) + blockDim.x * blockIdx.x + threadIdx.x] - 2 * T_old[iA] + T_old[n * (blockDim.y * blockIdx.y + threadIdx.y+1) + blockDim.x * blockIdx.x + threadIdx.x]) + (tau / (dy * dy)) * at * (T_old[n * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x-1] - 2 * T_old[iA] + T_old[n * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x+1]);
}
extern "C" void gpu(int index, int numDev, int n, int height, float *T, float *T_old)
{
cudaSetDevice(numDev);
if (index == 0)
{
int count;
unsigned int flag;
int device;
cudaGetDevice(&device);
cudaGetDeviceCount(&count);
cudaGetDeviceFlags(&flag);
printf("set device %d\n", numDev);
printf("device %d\n", device);
printf("device flag %d\n", flag);
printf("device count %d\n", count);
}
size_t size = (height+1) * (n+1) * sizeof(float);
float *dev_T = NULL;
cudaMalloc((void **)&dev_T, size);
float *dev_T_old = NULL;
cudaMalloc((void **)&dev_T_old, size);
cudaMemcpy( dev_T, T, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_T_old, T_old, size, cudaMemcpyHostToDevice );
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 dimGrid(n/BLOCK_SIZE, height/BLOCK_SIZE, 1);
__laplas__<<<dimGrid, dimBlock>>>(dev_T,dev_T_old, n, height);
cudaDeviceSynchronize();
cudaMemcpy(T, dev_T, size, cudaMemcpyDeviceToHost);
cudaFree(dev_T);
cudaFree(dev_T_old);
}
|
8,699 | /************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <algorithm>
#include "../graph_parser/parse.h"
#include "../graph_parser/util.h"
#include "kernel.cu"
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
#define RANGE 2048
void dump2file(int *adjmatrix, int num_nodes);
void print_vector(int *vector, int num);
void print_vectorf(float *vector, int num);
int main(int argc, char **argv)
{
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
// Input arguments
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // Choose file format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
srand(7);
// Allocate the csr array
csr_array *csr;
// Parse the graph into the csr structure
if (file_format == 1) {
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed);
} else {
fprintf(stderr, "reserve for future");
exit(1);
}
// Allocate the node value array
int *node_value = (int *)malloc(num_nodes * sizeof(int));
if (!node_value) fprintf(stderr, "malloc failed node_value\n");
// Allocate the set array
int *s_array = (int *)malloc(num_nodes * sizeof(int));
if (!s_array) fprintf(stderr, "malloc failed node_value\n");
// Randomize the node values
for (int i = 0; i < num_nodes; i++) {
node_value[i] = rand() % RANGE;
}
// Create device side buffers
int *row_d;
int *col_d;
int *c_array_d;
int *c_array_u_d;
int *s_array_d;
int *node_value_d;
int *min_array_d;
int *stop_d;
// Allocate the device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges , cudaGetErrorString(err));
return -1;
}
// Termination variable
err = cudaMalloc(&stop_d, sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc stop_d (size:%d) => %s\n", 1, cudaGetErrorString(err));
return -1;
}
// Allocate the device-side buffers for mis
err = cudaMalloc(&min_array_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc min_array_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&c_array_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc c_array_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&c_array_u_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc c_array_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&s_array_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc s_array_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&node_value_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc node_value_d (size:%d) => %s\n", num_nodes , cudaGetErrorString(err));
return -1;
}
double time1 = gettime();
#ifdef GEM5_FUSION
m5_work_begin(0, 0);
#endif
// Copy data to device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(node_value_d, node_value, num_nodes * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy feature_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err));
return -1;
}
// Work dimensions
int block_size = 128;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
// Launch the initialization kernel
init <<<grid, threads>>>(s_array_d, c_array_d, c_array_u_d,
num_nodes, num_edges);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: init kernel (%s)\n", cudaGetErrorString(err));
return -1;
}
// Termination variable
int stop = 1;
int iterations = 0;
while (stop) {
stop = 0;
// Copy the termination variable to the device
err = cudaMemcpy(stop_d, &stop, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: write stop_d variable (%s)\n", cudaGetErrorString(err));
return -1;
}
// Launch mis1
mis1 <<<grid, threads>>>(row_d, col_d, node_value_d, s_array_d,
c_array_d, min_array_d, stop_d, num_nodes,
num_edges);
// Launch mis2
mis2 <<<grid, threads>>>(row_d, col_d, node_value_d, s_array_d,
c_array_d, c_array_u_d, min_array_d, num_nodes,
num_edges);
// Launch mis3
mis3 <<<grid, threads>>>(c_array_u_d, c_array_d, num_nodes);
// Copy the termination variable back
err = cudaMemcpy(&stop, stop_d, sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: read stop_d variable (%s)\n", cudaGetErrorString(err));
return -1;
}
iterations++;
}
cudaThreadSynchronize();
err = cudaMemcpy(s_array, s_array_d, num_nodes * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy s_array_d failed (%s)\n", cudaGetErrorString(err));
return -1;
}
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
double time2 = gettime();
// Print out the timing characterisitics
printf("number of iterations: %d\n", iterations);
printf("kernel + memcpy time %f ms\n", (time2 - time1) * 1000);
#if 0
// Print the set array
print_vector(s_array, num_nodes);
#endif
// Clean up the host-side arrays
free(node_value);
free(s_array);
csr->freeArrays();
free(csr);
// Clean up the device-side arrays
cudaFree(row_d);
cudaFree(col_d);
cudaFree(c_array_d);
cudaFree(s_array_d);
cudaFree(node_value_d);
cudaFree(min_array_d);
cudaFree(stop_d);
return 0;
}
void print_vector(int *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
void print_vectorf(float *vector, int num)
{
FILE * fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%f\n", vector[i]);
}
fclose(fp);
}
|
8,700 | #include "includes.h"
__global__ void transpose_unroll4_col(int * mat, int * transpose, int nx, int ny)
{
int ix = blockIdx.x * blockDim.x * 4 + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int ti = iy * nx + ix;
int to = ix * ny + iy;
if (ix + 3 * blockDim.x < nx && iy < ny)
{
transpose[ti] = mat[to];
transpose[ti + blockDim.x] = mat[to + blockDim.x*ny];
transpose[ti + 2 * blockDim.x] = mat[to + 2 * blockDim.x*ny];
transpose[ti + 3 * blockDim.x] = mat[to + 3 * blockDim.x*ny];
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.