hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
0399413cd82827aa0e05768322de234c691b5c0d.hip | // !!! This is a file automatically generated by hipify!!!
/* This is machine problem 2, binning
* The problem is that you have particles in a 3D domain
* which is quantized into blocks or bins. You want to figure
* out which block each particle belongs to.
* Use the atomic functions that you learned about in lecture 3
* to implement the same functionality as the reference version on the cpu.
*
* FOR EXTRA CREDIT:
* Write a version of your binning kernel that uses atomics hierarchically,
* accumulating updates first into shared memory and then merging the results
* from shared memory into the global memory.
* As a hint, think about binning particles first into a coarse grid in a first kernel,
* and then binning the particles from each coarse bin into the
* final bins in a second kernel.
*/
/*
* SUBMISSION INSTRUCTIONS
* =========================
*
* You can submit your entire working directory for this assignment
* from any of the cluster machines by using our submit script. We want to be able
* to just run "make" to compile your code.
* The submit script bundles the entire current directory into
* a submission. Thus, you use it by CDing to a the directory for your assignment,
* and running:
*
* > cd *some directory*
* > /usr/class/cs193g/bin/submit mp2
*
* This will submit the current directory as your assignment. You can submit
* as many times as you want, and we will use your last submission.
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctime>
#include <hip/hip_runtime.h>
#include "mp2-util.h"
// TODO enable this to print debugging information
//const bool print_debug = true;
const bool print_debug = false;
event_pair timer;
// the particle coordinates are already normalized (in the domain [0,1] )
// gridding provides the base 2 log of how finely the domain is subdivided
// in each direction. So gridding.x == 6 means that the x-axis is subdivided
// into 64 parts. (i.e. 2^(gridding.x) = number of bins on x axis)
// Overall there cannot be more than 4B bins, so we can just concatenate the bin
// indices into a single uint.
unsigned int bin_index(float3 particle, int3 gridding)
{
unsigned int x_index = (unsigned int)(particle.x * (1 << gridding.x));
unsigned int y_index = (unsigned int)(particle.y * (1 << gridding.y));
unsigned int z_index = (unsigned int)(particle.z * (1 << gridding.z));
unsigned int index = 0;
index |= z_index;
index <<= gridding.y;
index |= y_index;
index <<= gridding.x;
index |= x_index;
return index;
}
void host_binning(float3 *particles, int *bins, int *bin_counters, int *overflow_flag, int3 gridding, int bin_size, int array_length)
{
for(int i=0;i<array_length;i++)
{
unsigned int bin = bin_index(particles[i],gridding);
if(bin_counters[bin] < bin_size)
{
unsigned int offset = bin_counters[bin];
// let's not do the whole precrement / postcrement thing...
bin_counters[bin]++;
bins[bin*bin_size + offset] = i;
}
else {
*overflow_flag = true;
}
}
}
bool cross_check_results(int * h_bins, int * h_bins_checker, int * h_bin_counters, int * h_bin_counters_checker, int * h_particles_binids_checker, int num_particles, int num_bins, int bin_size)
{
int error = 0;
for(int i=0;i<num_bins;i++)
{
if(h_bin_counters[i] != h_bin_counters_checker[i])
{
if(print_debug) fprintf(stderr,"mismatch! bin %d: cuda:%d host:%d particles \n",i,h_bin_counters[i],h_bin_counters_checker[i]);
error = 1;
}
for(int j=0; j<bin_size;j++)
{
// record which these particles went into bin i in the reference version
if(h_bins_checker[i*bin_size+j] != -1)
{
h_particles_binids_checker[h_bins_checker[i*bin_size+j]] = i;
}
}
for(int j=0; j<bin_size;j++)
{
if(h_bins_checker[i*bin_size+j] != -1)
{
if(h_particles_binids_checker[h_bins[i*bin_size+j]] != i)
{
error = 1;
}
}
}
}
if(error)
{
printf("Output of CUDA version and normal version didn't match! \n");
}
else {
printf("Worked! CUDA and reference output match. \n");
}
return error;
}
template
<typename T>
__global__ void initialize(T *array,T value, unsigned int array_length)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid < array_length)
{
array[gid] = value;
}
}
__device__
unsigned int d_bin_index(float3 particle, int3 gridding)
{
unsigned int x_index = (unsigned int)(particle.x * (1 << gridding.x));
unsigned int y_index = (unsigned int)(particle.y * (1 << gridding.y));
unsigned int z_index = (unsigned int)(particle.z * (1 << gridding.z));
unsigned int index = 0;
index |= z_index;
index <<= gridding.y;
index |= y_index;
index <<= gridding.x;
index |= x_index;
return index;
}
__global__
void binning(float3 *particles, int *bins, int *bin_counters, int *overflow_flag, int3 gridding, int bin_size, int array_length)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < array_length)
{
unsigned int bin = d_bin_index(particles[i],binning);
if (bin_counters[bin] < bin_size)
{
unsigned int offset = bin_counters[bin];
atomicInc(&bin_counters[bin], bin_size);
bins[bin*bin_size + offset] = i;
}
else
{
*overflow_flag = true;
}
}
}
void device_binning(float3 * h_particles, int * h_bins, int * h_bin_counters, int3 gridding, int num_particles, int num_bins, int bin_size)
{
// TODO: your implementation here
// How do I call a templated kernel? It's actually easy...
// int* array;
// int value = 0;
// int array_length = 0;
// initialize<<<griddim,blockdim>>>(array, value, array_length);
// The compiler will figure out the types of your arguments and codegen a implementation for each type you use.
float3 *d_particles;
int *d_bins, *d_bin_counters;
hipMalloc((void**)&d_particles, num_particles*sizeof(float3));
hipMalloc((void**)&d_bins, num_bins*bin_size*sizeof(int));
hipMalloc((void**)&d_bin_counters, num_bins*sizeof(int));
hipMemcpy(d_particles, h_particles, num_particles*sizeof(float3), hipMemcpyHostToDevice);
hipMemcpy(d_bins, h_bins, num_bins*bin_size*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_bin_counters, h_bin_counters, num_bins*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( binning) , dim3((num_particles+1023)/1024), dim3(1024), 0, 0, d_particles, d_bins, d_bin_counters, overflow_flag, gridding, bin_size, array_length);
hipMemcpy(h_bins, d_bins, num_bins*bin_size*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_bin_counters, d_bin_counters, num_bins*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_bins);
hipFree(d_bin_counters);
hipFree(d_particles);
}
int main(void)
{
// create arrays of 8M elements
int num_particles = 8*1024*1024;
int log_bpd = 6;
int bins_per_dim = 1 << log_bpd;
unsigned int num_bins = bins_per_dim*bins_per_dim*bins_per_dim;
// extra space to account for load imbalance to prevent frequent aborts due to bin overflow
int bin_size = num_particles/num_bins * 3;
int3 gridding = make_int3(log_bpd,log_bpd,log_bpd);
float3 *h_particles = 0;
int *h_bins = 0;
int *h_bin_counters = 0;
int *h_bins_checker = 0;
float3 *h_particles_checker = 0;
int *h_bin_counters_checker = 0;
int *h_particles_binids_checker = 0;
int h_overflow_flag_checker = 0;
// malloc host array
h_particles = (float3*)malloc(num_particles * sizeof(float3));
h_bins = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters = (int*)malloc(num_bins * sizeof(int));
h_particles_checker = (float3*)malloc(num_particles * sizeof(float3));
h_bins_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_particles_binids_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters_checker = (int*)malloc(num_bins * sizeof(int));
// if either memory allocation failed, report an error message
if(h_particles == 0 ||
h_bins == 0 || h_bin_counters == 0 ||
h_bins_checker == 0 || h_bin_counters_checker == 0 ||
h_particles_binids_checker == 0)
{
printf("couldn't allocate memory\n");
exit(1);
}
// generate random input
// initialize
srand(13);
for(int i=0;i< num_particles;i++)
{
h_particles[i] = h_particles_checker[i] = make_float3((float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX);
}
for(int i=0;i<num_bins;i++)
{
h_bin_counters[i] = h_bin_counters_checker[i] = 0;
}
for(int i=0;i<num_bins*bin_size;i++)
{
h_bins[i] = h_bins_checker[i] = h_particles_binids_checker[i] = -1;
}
device_binning(h_particles, h_bins, h_bin_counters, gridding, num_particles, num_bins, bin_size);
// generate reference output
start_timer(&timer);
host_binning(h_particles_checker, h_bins_checker, h_bin_counters_checker, &h_overflow_flag_checker, gridding, bin_size, num_particles);
stop_timer(&timer,"cpu binning");
if(h_overflow_flag_checker)
{
printf("one of the bins overflowed!\n");
exit(1);
}
// check CUDA output versus reference output
cross_check_results(h_bins, h_bins_checker, h_bin_counters, h_bin_counters_checker, h_particles_binids_checker, num_particles, num_bins, bin_size);
// deallocate memory
free(h_particles);
free(h_bins);
free(h_bin_counters);
free(h_particles_checker);
free(h_bins_checker);
free(h_particles_binids_checker);
free(h_bin_counters_checker);
return 0;
}
| 0399413cd82827aa0e05768322de234c691b5c0d.cu | /* This is machine problem 2, binning
* The problem is that you have particles in a 3D domain
* which is quantized into blocks or bins. You want to figure
* out which block each particle belongs to.
* Use the atomic functions that you learned about in lecture 3
* to implement the same functionality as the reference version on the cpu.
*
* FOR EXTRA CREDIT:
* Write a version of your binning kernel that uses atomics hierarchically,
* accumulating updates first into shared memory and then merging the results
* from shared memory into the global memory.
* As a hint, think about binning particles first into a coarse grid in a first kernel,
* and then binning the particles from each coarse bin into the
* final bins in a second kernel.
*/
/*
* SUBMISSION INSTRUCTIONS
* =========================
*
* You can submit your entire working directory for this assignment
* from any of the cluster machines by using our submit script. We want to be able
* to just run "make" to compile your code.
* The submit script bundles the entire current directory into
* a submission. Thus, you use it by CDing to a the directory for your assignment,
* and running:
*
* > cd *some directory*
* > /usr/class/cs193g/bin/submit mp2
*
* This will submit the current directory as your assignment. You can submit
* as many times as you want, and we will use your last submission.
*/
#include <stdlib.h>
#include <stdio.h>
#include <ctime>
#include <cuda.h>
#include "mp2-util.h"
// TODO enable this to print debugging information
//const bool print_debug = true;
const bool print_debug = false;
event_pair timer;
// the particle coordinates are already normalized (in the domain [0,1] )
// gridding provides the base 2 log of how finely the domain is subdivided
// in each direction. So gridding.x == 6 means that the x-axis is subdivided
// into 64 parts. (i.e. 2^(gridding.x) = number of bins on x axis)
// Overall there cannot be more than 4B bins, so we can just concatenate the bin
// indices into a single uint.
unsigned int bin_index(float3 particle, int3 gridding)
{
unsigned int x_index = (unsigned int)(particle.x * (1 << gridding.x));
unsigned int y_index = (unsigned int)(particle.y * (1 << gridding.y));
unsigned int z_index = (unsigned int)(particle.z * (1 << gridding.z));
unsigned int index = 0;
index |= z_index;
index <<= gridding.y;
index |= y_index;
index <<= gridding.x;
index |= x_index;
return index;
}
void host_binning(float3 *particles, int *bins, int *bin_counters, int *overflow_flag, int3 gridding, int bin_size, int array_length)
{
for(int i=0;i<array_length;i++)
{
unsigned int bin = bin_index(particles[i],gridding);
if(bin_counters[bin] < bin_size)
{
unsigned int offset = bin_counters[bin];
// let's not do the whole precrement / postcrement thing...
bin_counters[bin]++;
bins[bin*bin_size + offset] = i;
}
else {
*overflow_flag = true;
}
}
}
bool cross_check_results(int * h_bins, int * h_bins_checker, int * h_bin_counters, int * h_bin_counters_checker, int * h_particles_binids_checker, int num_particles, int num_bins, int bin_size)
{
int error = 0;
for(int i=0;i<num_bins;i++)
{
if(h_bin_counters[i] != h_bin_counters_checker[i])
{
if(print_debug) fprintf(stderr,"mismatch! bin %d: cuda:%d host:%d particles \n",i,h_bin_counters[i],h_bin_counters_checker[i]);
error = 1;
}
for(int j=0; j<bin_size;j++)
{
// record which these particles went into bin i in the reference version
if(h_bins_checker[i*bin_size+j] != -1)
{
h_particles_binids_checker[h_bins_checker[i*bin_size+j]] = i;
}
}
for(int j=0; j<bin_size;j++)
{
if(h_bins_checker[i*bin_size+j] != -1)
{
if(h_particles_binids_checker[h_bins[i*bin_size+j]] != i)
{
error = 1;
}
}
}
}
if(error)
{
printf("Output of CUDA version and normal version didn't match! \n");
}
else {
printf("Worked! CUDA and reference output match. \n");
}
return error;
}
template
<typename T>
__global__ void initialize(T *array,T value, unsigned int array_length)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid < array_length)
{
array[gid] = value;
}
}
__device__
unsigned int d_bin_index(float3 particle, int3 gridding)
{
unsigned int x_index = (unsigned int)(particle.x * (1 << gridding.x));
unsigned int y_index = (unsigned int)(particle.y * (1 << gridding.y));
unsigned int z_index = (unsigned int)(particle.z * (1 << gridding.z));
unsigned int index = 0;
index |= z_index;
index <<= gridding.y;
index |= y_index;
index <<= gridding.x;
index |= x_index;
return index;
}
__global__
void binning(float3 *particles, int *bins, int *bin_counters, int *overflow_flag, int3 gridding, int bin_size, int array_length)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < array_length)
{
unsigned int bin = d_bin_index(particles[i],binning);
if (bin_counters[bin] < bin_size)
{
unsigned int offset = bin_counters[bin];
atomicInc(&bin_counters[bin], bin_size);
bins[bin*bin_size + offset] = i;
}
else
{
*overflow_flag = true;
}
}
}
void device_binning(float3 * h_particles, int * h_bins, int * h_bin_counters, int3 gridding, int num_particles, int num_bins, int bin_size)
{
// TODO: your implementation here
// How do I call a templated kernel? It's actually easy...
// int* array;
// int value = 0;
// int array_length = 0;
// initialize<<<griddim,blockdim>>>(array, value, array_length);
// The compiler will figure out the types of your arguments and codegen a implementation for each type you use.
float3 *d_particles;
int *d_bins, *d_bin_counters;
cudaMalloc((void**)&d_particles, num_particles*sizeof(float3));
cudaMalloc((void**)&d_bins, num_bins*bin_size*sizeof(int));
cudaMalloc((void**)&d_bin_counters, num_bins*sizeof(int));
cudaMemcpy(d_particles, h_particles, num_particles*sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpy(d_bins, h_bins, num_bins*bin_size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_bin_counters, h_bin_counters, num_bins*sizeof(int), cudaMemcpyHostToDevice);
binning <<<(num_particles+1023)/1024, 1024>>> (d_particles, d_bins, d_bin_counters, overflow_flag, gridding, bin_size, array_length);
cudaMemcpy(h_bins, d_bins, num_bins*bin_size*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_bin_counters, d_bin_counters, num_bins*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_bins);
cudaFree(d_bin_counters);
cudaFree(d_particles);
}
int main(void)
{
// create arrays of 8M elements
int num_particles = 8*1024*1024;
int log_bpd = 6;
int bins_per_dim = 1 << log_bpd;
unsigned int num_bins = bins_per_dim*bins_per_dim*bins_per_dim;
// extra space to account for load imbalance to prevent frequent aborts due to bin overflow
int bin_size = num_particles/num_bins * 3;
int3 gridding = make_int3(log_bpd,log_bpd,log_bpd);
float3 *h_particles = 0;
int *h_bins = 0;
int *h_bin_counters = 0;
int *h_bins_checker = 0;
float3 *h_particles_checker = 0;
int *h_bin_counters_checker = 0;
int *h_particles_binids_checker = 0;
int h_overflow_flag_checker = 0;
// malloc host array
h_particles = (float3*)malloc(num_particles * sizeof(float3));
h_bins = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters = (int*)malloc(num_bins * sizeof(int));
h_particles_checker = (float3*)malloc(num_particles * sizeof(float3));
h_bins_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_particles_binids_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters_checker = (int*)malloc(num_bins * sizeof(int));
// if either memory allocation failed, report an error message
if(h_particles == 0 ||
h_bins == 0 || h_bin_counters == 0 ||
h_bins_checker == 0 || h_bin_counters_checker == 0 ||
h_particles_binids_checker == 0)
{
printf("couldn't allocate memory\n");
exit(1);
}
// generate random input
// initialize
srand(13);
for(int i=0;i< num_particles;i++)
{
h_particles[i] = h_particles_checker[i] = make_float3((float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX,(float)rand()/(float)RAND_MAX);
}
for(int i=0;i<num_bins;i++)
{
h_bin_counters[i] = h_bin_counters_checker[i] = 0;
}
for(int i=0;i<num_bins*bin_size;i++)
{
h_bins[i] = h_bins_checker[i] = h_particles_binids_checker[i] = -1;
}
device_binning(h_particles, h_bins, h_bin_counters, gridding, num_particles, num_bins, bin_size);
// generate reference output
start_timer(&timer);
host_binning(h_particles_checker, h_bins_checker, h_bin_counters_checker, &h_overflow_flag_checker, gridding, bin_size, num_particles);
stop_timer(&timer,"cpu binning");
if(h_overflow_flag_checker)
{
printf("one of the bins overflowed!\n");
exit(1);
}
// check CUDA output versus reference output
cross_check_results(h_bins, h_bins_checker, h_bin_counters, h_bin_counters_checker, h_particles_binids_checker, num_particles, num_bins, bin_size);
// deallocate memory
free(h_particles);
free(h_bins);
free(h_bin_counters);
free(h_particles_checker);
free(h_bins_checker);
free(h_particles_binids_checker);
free(h_bin_counters_checker);
return 0;
}
|
a1925c7d4a3b203e36b75b246dad1f090eadf7b7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* This sample queries the properties of the CUDA devices present in the system. */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %lu bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
printf(" Memory bus width: %d bytes\n",
deviceProp.memoryBusWidth);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
printf("\nTEST PASSED\n");
}
| a1925c7d4a3b203e36b75b246dad1f090eadf7b7.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* This sample queries the properties of the CUDA devices present in the system. */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %lu bytes\n",
deviceProp.totalGlobalMem);
#if CUDART_VERSION >= 2000
printf(" Number of multiprocessors: %d\n",
deviceProp.multiProcessorCount);
printf(" Number of cores: %d\n",
8 * deviceProp.multiProcessorCount);
#endif
printf(" Total amount of constant memory: %lu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %lu bytes\n",
deviceProp.textureAlignment);
printf(" Clock rate: %.2f GHz\n",
deviceProp.clockRate * 1e-6f);
printf(" Memory bus width: %d bytes\n",
deviceProp.memoryBusWidth);
#if CUDART_VERSION >= 2000
printf(" Concurrent copy and execution: %s\n",
deviceProp.deviceOverlap ? "Yes" : "No");
#endif
}
printf("\nTEST PASSED\n");
}
|
4a227c3f4c59343495db3557e086ae17f438c2bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "scan.cuh"
#include "segmented_scan_helpers.cuh"
#include "fill.cuh"
#include <contrib/libs/cub/hipcub/hipcub.hpp>
namespace NKernel {
template<typename T>
hipError_t ScanVector(const T* input, T* output, ui32 size, bool inclusive, TScanKernelContext<T>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T>;
if (inclusive) {
return hipcub::DeviceScan::InclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
} else {
return hipcub::DeviceScan::ExclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
}
}
template <class T>
struct TToSignedConversion {
using TSignedType = T;
};
template <>
struct TToSignedConversion<ui32> {
using TSignedType = int;
};
template<typename T_>
hipError_t SegmentedScanNonNegativeVector(const T_* input, T_* output, ui32 size, bool inclusive, TScanKernelContext<T_>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
T zeroValue = 0.0f;
if (inclusive) {
return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*)input, (T*)output, TNonNegativeSegmentedSum(), size, stream);
} else {
return hipcub::DeviceScan::ExclusiveScan((T*)context.PartResults, context.NumParts, (const T*) input, (T*)output, TNonNegativeSegmentedSum(), zeroValue, size, stream);
}
}
template<typename T_>
hipError_t SegmentedScanAndScatterNonNegativeVector(const T_* input, const ui32* indices, T_* output,
ui32 size, bool inclusive,
TScanKernelContext<T_>& context,
TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
if (inclusive) {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, true> outputIterator((T*)output, indices, indices + size);
return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*)input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
} else {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, false> outputIterator((T*)output, indices, indices + size);
FillBuffer<T>((T*)output, 0, size, stream);
return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*) input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
}
}
template <class T>
ui64 ScanVectorTempSize(ui32 size, bool inclusive) {
ui64 sizeInBytes = 0;
if (inclusive) {
hipcub::DeviceScan::InclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size);
} else {
hipcub::DeviceScan::ExclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size);
}
return sizeInBytes;
}
#define SCAN_VECTOR(Type) \
template hipError_t ScanVector<Type>(const Type *input, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template hipError_t SegmentedScanNonNegativeVector<Type>(const Type *input, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template hipError_t SegmentedScanAndScatterNonNegativeVector<Type>(const Type *input, const ui32* indices, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template ui64 ScanVectorTempSize<Type>(ui32, bool);
SCAN_VECTOR(int)
SCAN_VECTOR(ui32)
SCAN_VECTOR(float)
SCAN_VECTOR(double)
}
| 4a227c3f4c59343495db3557e086ae17f438c2bf.cu | #include "scan.cuh"
#include "segmented_scan_helpers.cuh"
#include "fill.cuh"
#include <contrib/libs/cub/cub/device/device_scan.cuh>
namespace NKernel {
template<typename T>
cudaError_t ScanVector(const T* input, T* output, ui32 size, bool inclusive, TScanKernelContext<T>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T>;
if (inclusive) {
return cub::DeviceScan::InclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
} else {
return cub::DeviceScan::ExclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
}
}
template <class T>
struct TToSignedConversion {
using TSignedType = T;
};
template <>
struct TToSignedConversion<ui32> {
using TSignedType = int;
};
template<typename T_>
cudaError_t SegmentedScanNonNegativeVector(const T_* input, T_* output, ui32 size, bool inclusive, TScanKernelContext<T_>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
T zeroValue = 0.0f;
if (inclusive) {
return cub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*)input, (T*)output, TNonNegativeSegmentedSum(), size, stream);
} else {
return cub::DeviceScan::ExclusiveScan((T*)context.PartResults, context.NumParts, (const T*) input, (T*)output, TNonNegativeSegmentedSum(), zeroValue, size, stream);
}
}
template<typename T_>
cudaError_t SegmentedScanAndScatterNonNegativeVector(const T_* input, const ui32* indices, T_* output,
ui32 size, bool inclusive,
TScanKernelContext<T_>& context,
TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
if (inclusive) {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, true> outputIterator((T*)output, indices, indices + size);
return cub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*)input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
} else {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, false> outputIterator((T*)output, indices, indices + size);
FillBuffer<T>((T*)output, 0, size, stream);
return cub::DeviceScan::InclusiveScan((T*)context.PartResults, context.NumParts, (const T*) input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
}
}
template <class T>
ui64 ScanVectorTempSize(ui32 size, bool inclusive) {
ui64 sizeInBytes = 0;
if (inclusive) {
cub::DeviceScan::InclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size);
} else {
cub::DeviceScan::ExclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size);
}
return sizeInBytes;
}
#define SCAN_VECTOR(Type) \
template cudaError_t ScanVector<Type>(const Type *input, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template cudaError_t SegmentedScanNonNegativeVector<Type>(const Type *input, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template cudaError_t SegmentedScanAndScatterNonNegativeVector<Type>(const Type *input, const ui32* indices, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template ui64 ScanVectorTempSize<Type>(ui32, bool);
SCAN_VECTOR(int)
SCAN_VECTOR(ui32)
SCAN_VECTOR(float)
SCAN_VECTOR(double)
}
|
71acb19441e0d381e274c7e823b06f7efd04e00c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "orttraining/training_ops/cuda/optimizer/common.cuh"
#include "orttraining/training_ops/cuda/optimizer/adam.h"
#include "orttraining/training_ops/cuda/optimizer/common.h"
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
__global__ void _AdamOptimizer_mode0(
const T1* eta,
const T3* weights,
const T_GRAD* grads,
const T4* moment_1,
const T4* moment_2,
const T3* loss_scale,
const T_GRAD_NORM* grad_norm,
const T4 alpha,
const T4 beta,
const T4 lambda,
const T4 epsilon,
const T4 alpha_correction,
const T4 beta_correction,
T4* moment_1_out,
T4* moment_2_out,
T3* weights_out,
T_GRAD* grads_out,
T_MIXED_PRECISION_FP* mixed_precision_weights_out,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
const T4 actual_scale = _ComputeGradScale<T3, T_GRAD_NORM, T4>(loss_scale, grad_norm);
// Gradient scaling/clipping.
const T4 g = T4(grads[id]) / actual_scale;
// A shared constant.
const T4 one = T4(1.0f);
// Compute exponentially-averaged historical gradient.
const T4 m1o = alpha * moment_1[id] + (one - alpha) * g;
const T4 m1o_corrected = m1o / alpha_correction;
// Compute exponentially-averaged historical squared gradient.
const T4 m2o = beta * moment_2[id] + (one - beta) * g * g;
const T4 m2o_corrected = m2o / beta_correction;
// Compute weight update.
const T4 denom = _Sqrt(m2o_corrected) + epsilon;
const T4 update = (m1o_corrected / denom) + (lambda * T4(weights[id]));
const T4 delta = -T4(*eta) * update;
// Compute the new gradient.
if (grads_out) {
grads_out[id] = T_GRAD(delta);
}
// Compute the new weight.
if (weights_out) {
weights_out[id] = weights[id] + T3(delta);
if (mixed_precision_weights_out) {
mixed_precision_weights_out[id] = static_cast<T_MIXED_PRECISION_FP>(weights_out[id]);
}
}
moment_1_out[id] = m1o;
moment_2_out[id] = m2o;
}
template <typename T1, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
__global__ void _AdamOptimizer_mode1(
const T1* eta,
const T3* weights,
const T_GRAD* grads,
const T4* moment_1,
const T4* moment_2,
const T3* loss_scale,
const T_GRAD_NORM* grad_norm,
const T4 alpha,
const T4 beta,
const T4 lambda,
const T4 epsilon,
const T4 alpha_correction,
const T4 beta_correction,
T4* moment_1_out,
T4* moment_2_out,
T3* weights_out,
T_GRAD* grads_out,
T_MIXED_PRECISION_FP* mixed_precision_weights_out,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
const T4 actual_scale = _ComputeGradScale<T3, T_GRAD_NORM, T4>(loss_scale, grad_norm);
// Gradient scaling/clipping.
const T4 g = T4(grads[id]) / actual_scale;
// A shared constant.
const T4 one = T4(1.0f);
// Compute exponentially-averaged historical gradient.
const T4 m1o = alpha * moment_1[id] + (one - alpha) * g;
// Compute exponentially-averaged historical squared gradient.
const T4 m2o = beta * moment_2[id] + (one - beta) * g * g;
const T4 denom = _Sqrt(m2o) + epsilon;
// Apply bias correction terms on learning rate
const T4 step_size = T4(*eta) * _Sqrt(beta_correction) / alpha_correction;
// Huggingface updates weights in the following logic:
// param' = param - step_size * m1o / denom
// param_out = param' - original_lr * lambda * param'
// then param_out = param - step_size * m1o / denom - original_lr * lambda * (param - step_size * m1o / denom)
// so delta = -step_size * m1o / denom - original_lr * lambda * (param - step_size * m1o / denom)
const T4 delta = -step_size * m1o / denom - T4(*eta) * lambda * (T4(weights[id]) - step_size * m1o / denom);
// Compute the new gradient.
if (grads_out) {
grads_out[id] = T_GRAD(delta);
}
// Compute the new weight.
if (weights_out) {
weights_out[id] = weights[id] + T3(delta);
if (mixed_precision_weights_out) {
mixed_precision_weights_out[id] = static_cast<T_MIXED_PRECISION_FP>(weights_out[id]);
}
}
moment_1_out[id] = m1o;
moment_2_out[id] = m2o;
}
template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
void AdamOptimizerImpl(
const T1* eta,
const T2 update_count,
const T3* weights,
const T_GRAD* grads,
const T4* moment_1,
const T4* moment_2,
const T3* loss_scale,
const T_GRAD_NORM* grad_norm,
const T4 alpha,
const T4 beta,
const T4 lambda,
const T4 epsilon,
const bool do_bias_correction,
const int64_t weight_decay_mode,
T4* moment_1_out,
T4* moment_2_out,
T3* weights_out,
T_GRAD* grads_out,
T_MIXED_PRECISION_FP* mixed_precision_weights_out,
size_t count) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
// If bias correction coefficients are set to 1s, it's equivalent to disabling bias correction.
const T4 alpha_correction = do_bias_correction ?
onnxruntime::contrib::compute_bias_correction_coefficient(alpha, update_count) : T4(1.f);
const T4 beta_correction = do_bias_correction ?
onnxruntime::contrib::compute_bias_correction_coefficient(beta, update_count) : T4(1.f);
// Currently two modes of Adamw are supported:
// Mode 0: Pytorch https://pytorch.org/docs/stable/_modules/torch/optim/adamw.html#AdamW,
// bias correction is applied on m and v individually,
// weight decay is applied before weight is updated.
// Mode 1: Huggingface https://huggingface.co/transformers/_modules/transformers/optimization.html#AdamW.,
// bias correction is applied on learning rate,
// weight decay is applied after weight is updated.
if (weight_decay_mode == 0) {
hipLaunchKernelGGL(( _AdamOptimizer_mode0<T1, T3, T4, T_GRAD, T_GRAD_NORM, T_MIXED_PRECISION_FP>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
eta,
weights,
grads,
moment_1,
moment_2,
loss_scale,
grad_norm,
alpha,
beta,
lambda,
epsilon,
alpha_correction,
beta_correction,
moment_1_out,
moment_2_out,
weights_out,
grads_out,
mixed_precision_weights_out,
N);
}
else if (weight_decay_mode == 1) {
hipLaunchKernelGGL(( _AdamOptimizer_mode1<T1, T3, T4, T_GRAD, T_GRAD_NORM, T_MIXED_PRECISION_FP>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
eta,
weights,
grads,
moment_1,
moment_2,
loss_scale,
grad_norm,
alpha,
beta,
lambda,
epsilon,
alpha_correction,
beta_correction,
moment_1_out,
moment_2_out,
weights_out,
grads_out,
mixed_precision_weights_out,
N);
}
else {
// Shouldn't reach here
ORT_THROW("Unsupported Adamw optimizer mode.");
}
}
#define SPECIALIZED_AdamOptimizerImpl(T1, T2, T3, T4, T_GRAD, T_GRAD_NORM, T_MIXED_PRECISION_FP) \
template void AdamOptimizerImpl( \
const T1* eta, \
const T2 update_count, \
const T3* weights, \
const T_GRAD* grads, \
const T4* moment_1, \
const T4* moment_2, \
const T3* loss_scale, \
const T_GRAD_NORM* grad_norm, \
const T4 alpha, \
const T4 beta, \
const T4 lambda, \
const T4 epsilon, \
const bool do_bias_correction, \
const int64_t weight_decay_mode, \
T4* moment_1_out, \
T4* moment_2_out, \
T3* weights_out, \
T_GRAD* grads_out, \
T_MIXED_PRECISION_FP* mixed_precision_weights_out, \
size_t count);
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, float, float, half)
SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, float, float, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, float, float, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, half, half, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, half, float, half)
SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, half, half, half)
SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, half, float, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, half, half, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, half, float, half)
} // namespace cuda
} // namespace onnxruntime
| 71acb19441e0d381e274c7e823b06f7efd04e00c.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "orttraining/training_ops/cuda/optimizer/common.cuh"
#include "orttraining/training_ops/cuda/optimizer/adam.h"
#include "orttraining/training_ops/cuda/optimizer/common.h"
namespace onnxruntime {
namespace cuda {
template <typename T1, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
__global__ void _AdamOptimizer_mode0(
const T1* eta,
const T3* weights,
const T_GRAD* grads,
const T4* moment_1,
const T4* moment_2,
const T3* loss_scale,
const T_GRAD_NORM* grad_norm,
const T4 alpha,
const T4 beta,
const T4 lambda,
const T4 epsilon,
const T4 alpha_correction,
const T4 beta_correction,
T4* moment_1_out,
T4* moment_2_out,
T3* weights_out,
T_GRAD* grads_out,
T_MIXED_PRECISION_FP* mixed_precision_weights_out,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
const T4 actual_scale = _ComputeGradScale<T3, T_GRAD_NORM, T4>(loss_scale, grad_norm);
// Gradient scaling/clipping.
const T4 g = T4(grads[id]) / actual_scale;
// A shared constant.
const T4 one = T4(1.0f);
// Compute exponentially-averaged historical gradient.
const T4 m1o = alpha * moment_1[id] + (one - alpha) * g;
const T4 m1o_corrected = m1o / alpha_correction;
// Compute exponentially-averaged historical squared gradient.
const T4 m2o = beta * moment_2[id] + (one - beta) * g * g;
const T4 m2o_corrected = m2o / beta_correction;
// Compute weight update.
const T4 denom = _Sqrt(m2o_corrected) + epsilon;
const T4 update = (m1o_corrected / denom) + (lambda * T4(weights[id]));
const T4 delta = -T4(*eta) * update;
// Compute the new gradient.
if (grads_out) {
grads_out[id] = T_GRAD(delta);
}
// Compute the new weight.
if (weights_out) {
weights_out[id] = weights[id] + T3(delta);
if (mixed_precision_weights_out) {
mixed_precision_weights_out[id] = static_cast<T_MIXED_PRECISION_FP>(weights_out[id]);
}
}
moment_1_out[id] = m1o;
moment_2_out[id] = m2o;
}
template <typename T1, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
__global__ void _AdamOptimizer_mode1(
const T1* eta,
const T3* weights,
const T_GRAD* grads,
const T4* moment_1,
const T4* moment_2,
const T3* loss_scale,
const T_GRAD_NORM* grad_norm,
const T4 alpha,
const T4 beta,
const T4 lambda,
const T4 epsilon,
const T4 alpha_correction,
const T4 beta_correction,
T4* moment_1_out,
T4* moment_2_out,
T3* weights_out,
T_GRAD* grads_out,
T_MIXED_PRECISION_FP* mixed_precision_weights_out,
CUDA_LONG N) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
const T4 actual_scale = _ComputeGradScale<T3, T_GRAD_NORM, T4>(loss_scale, grad_norm);
// Gradient scaling/clipping.
const T4 g = T4(grads[id]) / actual_scale;
// A shared constant.
const T4 one = T4(1.0f);
// Compute exponentially-averaged historical gradient.
const T4 m1o = alpha * moment_1[id] + (one - alpha) * g;
// Compute exponentially-averaged historical squared gradient.
const T4 m2o = beta * moment_2[id] + (one - beta) * g * g;
const T4 denom = _Sqrt(m2o) + epsilon;
// Apply bias correction terms on learning rate
const T4 step_size = T4(*eta) * _Sqrt(beta_correction) / alpha_correction;
// Huggingface updates weights in the following logic:
// param' = param - step_size * m1o / denom
// param_out = param' - original_lr * lambda * param'
// then param_out = param - step_size * m1o / denom - original_lr * lambda * (param - step_size * m1o / denom)
// so delta = -step_size * m1o / denom - original_lr * lambda * (param - step_size * m1o / denom)
const T4 delta = -step_size * m1o / denom - T4(*eta) * lambda * (T4(weights[id]) - step_size * m1o / denom);
// Compute the new gradient.
if (grads_out) {
grads_out[id] = T_GRAD(delta);
}
// Compute the new weight.
if (weights_out) {
weights_out[id] = weights[id] + T3(delta);
if (mixed_precision_weights_out) {
mixed_precision_weights_out[id] = static_cast<T_MIXED_PRECISION_FP>(weights_out[id]);
}
}
moment_1_out[id] = m1o;
moment_2_out[id] = m2o;
}
template <typename T1, typename T2, typename T3, typename T4, typename T_GRAD, typename T_GRAD_NORM, typename T_MIXED_PRECISION_FP>
void AdamOptimizerImpl(
const T1* eta,
const T2 update_count,
const T3* weights,
const T_GRAD* grads,
const T4* moment_1,
const T4* moment_2,
const T3* loss_scale,
const T_GRAD_NORM* grad_norm,
const T4 alpha,
const T4 beta,
const T4 lambda,
const T4 epsilon,
const bool do_bias_correction,
const int64_t weight_decay_mode,
T4* moment_1_out,
T4* moment_2_out,
T3* weights_out,
T_GRAD* grads_out,
T_MIXED_PRECISION_FP* mixed_precision_weights_out,
size_t count) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
// If bias correction coefficients are set to 1s, it's equivalent to disabling bias correction.
const T4 alpha_correction = do_bias_correction ?
onnxruntime::contrib::compute_bias_correction_coefficient(alpha, update_count) : T4(1.f);
const T4 beta_correction = do_bias_correction ?
onnxruntime::contrib::compute_bias_correction_coefficient(beta, update_count) : T4(1.f);
// Currently two modes of Adamw are supported:
// Mode 0: Pytorch https://pytorch.org/docs/stable/_modules/torch/optim/adamw.html#AdamW,
// bias correction is applied on m and v individually,
// weight decay is applied before weight is updated.
// Mode 1: Huggingface https://huggingface.co/transformers/_modules/transformers/optimization.html#AdamW.,
// bias correction is applied on learning rate,
// weight decay is applied after weight is updated.
if (weight_decay_mode == 0) {
_AdamOptimizer_mode0<T1, T3, T4, T_GRAD, T_GRAD_NORM, T_MIXED_PRECISION_FP><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
eta,
weights,
grads,
moment_1,
moment_2,
loss_scale,
grad_norm,
alpha,
beta,
lambda,
epsilon,
alpha_correction,
beta_correction,
moment_1_out,
moment_2_out,
weights_out,
grads_out,
mixed_precision_weights_out,
N);
}
else if (weight_decay_mode == 1) {
_AdamOptimizer_mode1<T1, T3, T4, T_GRAD, T_GRAD_NORM, T_MIXED_PRECISION_FP><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
eta,
weights,
grads,
moment_1,
moment_2,
loss_scale,
grad_norm,
alpha,
beta,
lambda,
epsilon,
alpha_correction,
beta_correction,
moment_1_out,
moment_2_out,
weights_out,
grads_out,
mixed_precision_weights_out,
N);
}
else {
// Shouldn't reach here
ORT_THROW("Unsupported Adamw optimizer mode.");
}
}
#define SPECIALIZED_AdamOptimizerImpl(T1, T2, T3, T4, T_GRAD, T_GRAD_NORM, T_MIXED_PRECISION_FP) \
template void AdamOptimizerImpl( \
const T1* eta, \
const T2 update_count, \
const T3* weights, \
const T_GRAD* grads, \
const T4* moment_1, \
const T4* moment_2, \
const T3* loss_scale, \
const T_GRAD_NORM* grad_norm, \
const T4 alpha, \
const T4 beta, \
const T4 lambda, \
const T4 epsilon, \
const bool do_bias_correction, \
const int64_t weight_decay_mode, \
T4* moment_1_out, \
T4* moment_2_out, \
T3* weights_out, \
T_GRAD* grads_out, \
T_MIXED_PRECISION_FP* mixed_precision_weights_out, \
size_t count);
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, float, float, half)
SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, float, float, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, float, float, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, half, half, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, float, half, float, half)
SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, half, half, half)
SPECIALIZED_AdamOptimizerImpl(half, int64_t, float, half, half, float, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, half, half, half)
SPECIALIZED_AdamOptimizerImpl(float, int64_t, float, half, half, float, half)
} // namespace cuda
} // namespace onnxruntime
|
f10e96316df1a12516afff89ca62e6dbc2f52a0f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define GLM_FORCE_CUDA
#include <glm.hpp>
#include "sphere.h"
using namespace std;
using namespace crs;
__device__ float crs::SphereHit(Sphere *s, Ray *r){
vec3 oc = r->origin - s->center;
float a = glm::dot(r->direction, r->direction);
float b = 2.0f * glm::dot(oc, r->direction);
float c = glm::dot(oc, oc) - s->radius * s->radius;
float discriminant = b*b - 4*a*c;
if(discriminant < 0){
return -1.0f;
}else{
return ( -b - glm::sqrt(discriminant)) / (2.0f * a);
}
}
__device__ void crs::TestSphereIntersections(Sphere *sphere, unsigned int c, HitRecord *r){
// early exit
if (r->terminated) return;
// loop over every sphere
unsigned int i;
for(i=0; i < c; i++){
// local copy
Sphere s = sphere[i];
float d = SphereHit(&s, &r->wi);
// make sure we keep the closest intersection
if (d >= r->wi.length){
return;
}else{
// we have a hit
if(d > 0.00001f){
r->wi.length = d;
r->location = r->wi.evaluate();
r->normal = glm::normalize(r->location - s.center);
r->bxdf = s.bxdf;
}
}
}
}
__global__ void crs::KERNEL_SPHEREINTERSECT(Sphere *spheres, unsigned int count, HitRecord *hitrecords, int w, int h){
unsigned long blockId = blockIdx.x + blockIdx.y * gridDim.x;
unsigned long threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (threadId >= w * h) return;
// test for sphere intersections
TestSphereIntersections(spheres, count, &hitrecords[threadId]);
}
crs::Sphere::Sphere(){
center = vec3(0.0f, 0.0f, 1.0f); // default position in world units
radius = 1.0f; // default radius in world units
bxdf = 0; // we're assuming there's at least one bxdf
}
crs::Sphere::~Sphere(){
}
| f10e96316df1a12516afff89ca62e6dbc2f52a0f.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#define GLM_FORCE_CUDA
#include <glm.hpp>
#include "sphere.h"
using namespace std;
using namespace crs;
__device__ float crs::SphereHit(Sphere *s, Ray *r){
vec3 oc = r->origin - s->center;
float a = glm::dot(r->direction, r->direction);
float b = 2.0f * glm::dot(oc, r->direction);
float c = glm::dot(oc, oc) - s->radius * s->radius;
float discriminant = b*b - 4*a*c;
if(discriminant < 0){
return -1.0f;
}else{
return ( -b - glm::sqrt(discriminant)) / (2.0f * a);
}
}
__device__ void crs::TestSphereIntersections(Sphere *sphere, unsigned int c, HitRecord *r){
// early exit
if (r->terminated) return;
// loop over every sphere
unsigned int i;
for(i=0; i < c; i++){
// local copy
Sphere s = sphere[i];
float d = SphereHit(&s, &r->wi);
// make sure we keep the closest intersection
if (d >= r->wi.length){
return;
}else{
// we have a hit
if(d > 0.00001f){
r->wi.length = d;
r->location = r->wi.evaluate();
r->normal = glm::normalize(r->location - s.center);
r->bxdf = s.bxdf;
}
}
}
}
__global__ void crs::KERNEL_SPHEREINTERSECT(Sphere *spheres, unsigned int count, HitRecord *hitrecords, int w, int h){
unsigned long blockId = blockIdx.x + blockIdx.y * gridDim.x;
unsigned long threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (threadId >= w * h) return;
// test for sphere intersections
TestSphereIntersections(spheres, count, &hitrecords[threadId]);
}
crs::Sphere::Sphere(){
center = vec3(0.0f, 0.0f, 1.0f); // default position in world units
radius = 1.0f; // default radius in world units
bxdf = 0; // we're assuming there's at least one bxdf
}
crs::Sphere::~Sphere(){
}
|
d05ef3b6e1664825b4db379f60ef3fbaaab0893c.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
int *dev_indexTempStorage;
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;// *0.001;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
// limit cell width to prevent crashing
//gridCellWidth = gridCellWidth < 5.0 ? 5.0 : gridCellWidth;
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
printf("\ngridCellCount = %d", gridCellCount);
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
hipMalloc((void**)&dev_indexTempStorage, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_indexTempStorage failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
return glm::vec3(0.0f, 0.0f, 0.0f);
}
__global__ void kernSwapVel1Vel2(int N, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
vel1[index] = vel2[index];
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 center(0.0, 0.0, 0.0);
glm::vec3 prox(0.0, 0.0, 0.0);
glm::vec3 velocity(0.0, 0.0, 0.0);
int numNeighborsCenter = 0;
//int numNeighborsProx = 0;
int numNeighborsVelocity = 0;
for (int i = 0; i < N; i++)
{
if (i != index)
{
float dist = glm::distance(pos[i], pos[index]);
//printf("\ndist: %f", dist);
if (dist < rule1Distance)
{
center = center + pos[i];
numNeighborsCenter++;
}
if (dist < rule2Distance)
{
prox = prox - (pos[i] - pos[index]);
}
if (dist < rule3Distance)
{
velocity = velocity + vel1[i];
numNeighborsVelocity++;
}
}
}
if (numNeighborsCenter != 0)
{
center = center / (float)numNeighborsCenter;
vel2[index] += (center - pos[index]) * rule1Scale;
}
if (numNeighborsVelocity != 0)
{
//velocity = velocity / (float)numNeighborsVelocity;
vel2[index] += velocity * rule3Scale;
}
vel2[index] += prox * rule2Scale;
if (glm::length(vel2[index]) > maxSpeed)
{
vel2[index] = glm::normalize(vel2[index]) * maxSpeed;
}
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// find indices from pos
int x = floor((pos[index].x - gridMin.x) * inverseCellWidth);
int y = floor((pos[index].y - gridMin.y) * inverseCellWidth);
int z = floor((pos[index].z - gridMin.z) * inverseCellWidth);
int gridIndex = gridIndex3Dto1D(x, y, z, gridResolution);
gridIndices[index] = gridIndex;
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
if (index > 0)
{
if (particleGridIndices[index] != particleGridIndices[index - 1])
{
gridCellStartIndices[particleGridIndices[index]] = index;
gridCellEndIndices[particleGridIndices[index-1]] = index-1;
}
}
else
{
gridCellStartIndices[particleGridIndices[index]] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int start = gridCellStartIndices[index];
int end = gridCellEndIndices[index];
if (start == -1)
{
return;
}
// loop through points in the voxel
for (int p = start; p <= end; p++)
{
glm::vec3 center(0.0, 0.0, 0.0);
glm::vec3 prox(0.0, 0.0, 0.0);
glm::vec3 velocity(0.0, 0.0, 0.0);
int numNeighborsCenter = 0;
int numNeighborsVelocity = 0;
// get quadrant and neighboring voxels
// find indices from pos
// unfold particle index
int pIndex1 = particleArrayIndices[p];
float voxelPosX = (pos[pIndex1].x - gridMin.x) * inverseCellWidth;
float voxelPosY = (pos[pIndex1].y - gridMin.y) * inverseCellWidth;
float voxelPosZ = (pos[pIndex1].z - gridMin.z) * inverseCellWidth;
int x = floor(voxelPosX);
int y = floor(voxelPosY);
int z = floor(voxelPosZ);
int startX = ((voxelPosX - x) > (0.5f * inverseCellWidth)) ? x : x < 1 ? 0 : x - 1;
int startY = ((voxelPosY - y) > (0.5f * inverseCellWidth)) ? y : y < 1 ? 0 : y - 1;
int startZ = ((voxelPosZ - z) > (0.5f * inverseCellWidth)) ? z : z < 1 ? 0 : z - 1;
// lookup particles in neighboring voxels
for (int i = 0; i < 2; i++)
{
for (int j = 0; j < 2; j++)
{
for (int k = 0; k < 2; k++)
{
int Z = startZ + i;
int Y = startY + j;
int X = startX + k;
if (Z < 0 || Z >= gridResolution ||
Y < 0 || Y >= gridResolution ||
X < 0 || X >= gridResolution)
{
continue;
}
else
{
// get the points at the current voxel
int voxelIndex = gridIndex3Dto1D(X, Y, Z, gridResolution);
int subStart = gridCellStartIndices[voxelIndex];
int subEnd = gridCellEndIndices[voxelIndex];
if (subStart != -1)
{
for (int pIdx = subStart; pIdx <= subEnd; pIdx++)
{
// unfold particle index
int pIndex2 = particleArrayIndices[pIdx];
if (pIndex2 != pIndex1)
{
float dist = glm::distance(pos[pIndex2], pos[pIndex1]);
//printf("\ndist: %f", dist);
if (dist < rule1Distance)
{
center = center + pos[pIndex2];
numNeighborsCenter++;
}
if (dist < rule2Distance)
{
prox = prox - (pos[pIndex2] - pos[pIndex1]);
}
if (dist < rule3Distance)
{
velocity = velocity + vel1[pIndex2];
numNeighborsVelocity++;
}
}
}
}
}
}
}
}
if (numNeighborsCenter != 0)
{
center = center / (float)numNeighborsCenter;
vel2[pIndex1] += (center - pos[pIndex1]) * rule1Scale;
}
if (numNeighborsVelocity != 0)
{
//velocity = velocity / (float)numNeighborsVelocity;
vel2[pIndex1] += velocity * rule3Scale;
}
vel2[pIndex1] += prox * rule2Scale;
if (glm::length(vel2[pIndex1]) > maxSpeed)
{
vel2[pIndex1] = glm::normalize(vel2[pIndex1]) * maxSpeed;
}
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int start = gridCellStartIndices[index];
int end = gridCellEndIndices[index];
if (start == -1)
{
return;
}
// loop through points in the voxel
for (int p = start; p <= end; p++)
{
glm::vec3 center(0.0, 0.0, 0.0);
glm::vec3 prox(0.0, 0.0, 0.0);
glm::vec3 velocity(0.0, 0.0, 0.0);
int numNeighborsCenter = 0;
int numNeighborsVelocity = 0;
// get quadrant and neighboring voxels
// find indices from pos
// no need to unfold particle index
//int pIndex1 = particleArrayIndices[p];
float voxelPosX = (pos[p].x - gridMin.x) * inverseCellWidth;
float voxelPosY = (pos[p].y - gridMin.y) * inverseCellWidth;
float voxelPosZ = (pos[p].z - gridMin.z) * inverseCellWidth;
int x = floor(voxelPosX);
int y = floor(voxelPosY);
int z = floor(voxelPosZ);
int startX = ((voxelPosX - x) > (0.5f * inverseCellWidth)) ? x : x < 1 ? 0 : x - 1;
int startY = ((voxelPosY - y) > (0.5f * inverseCellWidth)) ? y : y < 1 ? 0 : y - 1;
int startZ = ((voxelPosZ - z) > (0.5f * inverseCellWidth)) ? z : z < 1 ? 0 : z - 1;
// lookup particles in neighboring voxels
for (int i = 0; i < 2; i++)
{
for (int j = 0; j < 2; j++)
{
for (int k = 0; k < 2; k++)
{
int Z = startZ + i;
int Y = startY + j;
int X = startX + k;
// check if out of bounds
if (Z < 0 || Z >= gridResolution ||
Y < 0 || Y >= gridResolution ||
X < 0 || X >= gridResolution)
{
continue;
}
else
{
// get the points at the current voxel
int voxelIndex = gridIndex3Dto1D(X, Y, Z, gridResolution);
int subStart = gridCellStartIndices[voxelIndex];
int subEnd = gridCellEndIndices[voxelIndex];
if (subStart != -1)
{
for (int pIdx = subStart; pIdx <= subEnd; pIdx++)
{
// no need to unfold particle index
//int pIndex2 = particleArrayIndices[pIdx];
if (pIdx != p)
{
float dist = glm::distance(pos[pIdx], pos[p]);
//printf("\ndist: %f", dist);
if (dist < rule1Distance)
{
center = center + pos[pIdx];
numNeighborsCenter++;
}
if (dist < rule2Distance)
{
prox = prox - (pos[pIdx] - pos[p]);
}
if (dist < rule3Distance)
{
velocity = velocity + vel1[pIdx];
numNeighborsVelocity++;
}
}
}
}
}
}
}
}
if (numNeighborsCenter != 0)
{
center = center / (float)numNeighborsCenter;
vel2[p] += (center - pos[p]) * rule1Scale;
}
if (numNeighborsVelocity != 0)
{
//velocity = velocity / (float)numNeighborsVelocity;
vel2[p] += velocity * rule3Scale;
}
vel2[p] += prox * rule2Scale;
if (glm::length(vel2[p]) > maxSpeed)
{
vel2[p] = glm::normalize(vel2[p]) * maxSpeed;
}
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// update velocity
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
// swap velocity
dev_vel1 = dev_vel2;
// update position
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt,
dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridVoxels((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// reset grid start buffer
kernResetIntBuffer << <fullBlocksPerGridVoxels, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
// reset grid end buffer
kernResetIntBuffer << <fullBlocksPerGridVoxels, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
// save start end particle indices in start end buffers
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// compute velocities
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGridVoxels, blockSize >> >(
gridCellCount, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// swap velocity
dev_vel1 = dev_vel2;
// update position
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt,
dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridVoxels((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// store temp
hipMemcpy(dev_indexTempStorage, dev_particleGridIndices, sizeof(int) * numObjects, hipMemcpyDeviceToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::device_ptr<glm::vec3> dev_thrust_values_pos(dev_pos);
thrust::device_ptr<glm::vec3> dev_thrust_values_vel1(dev_vel1);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
hipMemcpy(dev_particleGridIndices, dev_indexTempStorage, sizeof(int) * numObjects, hipMemcpyDeviceToDevice); // reset idx
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values_vel1);
hipMemcpy(dev_particleGridIndices, dev_indexTempStorage, sizeof(int) * numObjects, hipMemcpyDeviceToDevice); // reset idx
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values_pos);
// reset grid start buffer
kernResetIntBuffer << <fullBlocksPerGridVoxels, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
// reset grid end buffer
kernResetIntBuffer << <fullBlocksPerGridVoxels, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
// save start end particle indices in start end buffers
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// compute velocities
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGridVoxels, blockSize >> >(
gridCellCount, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!");
// swap velocity
dev_vel1 = dev_vel2;
// update position
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt,
dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_indexTempStorage);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete(intKeys);
delete(intValues);
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| d05ef3b6e1664825b4db379f60ef3fbaaab0893c.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
int *dev_indexTempStorage;
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;// *0.001;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
// limit cell width to prevent crashing
//gridCellWidth = gridCellWidth < 5.0 ? 5.0 : gridCellWidth;
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
printf("\ngridCellCount = %d", gridCellCount);
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
cudaMalloc((void**)&dev_indexTempStorage, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_indexTempStorage failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
// Rule 2: boids try to stay a distance d away from each other
// Rule 3: boids try to match the speed of surrounding boids
return glm::vec3(0.0f, 0.0f, 0.0f);
}
__global__ void kernSwapVel1Vel2(int N, glm::vec3 *vel1, glm::vec3 *vel2) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
vel1[index] = vel2[index];
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
// Clamp the speed
// Record the new velocity into vel2. Question: why NOT vel1?
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 center(0.0, 0.0, 0.0);
glm::vec3 prox(0.0, 0.0, 0.0);
glm::vec3 velocity(0.0, 0.0, 0.0);
int numNeighborsCenter = 0;
//int numNeighborsProx = 0;
int numNeighborsVelocity = 0;
for (int i = 0; i < N; i++)
{
if (i != index)
{
float dist = glm::distance(pos[i], pos[index]);
//printf("\ndist: %f", dist);
if (dist < rule1Distance)
{
center = center + pos[i];
numNeighborsCenter++;
}
if (dist < rule2Distance)
{
prox = prox - (pos[i] - pos[index]);
}
if (dist < rule3Distance)
{
velocity = velocity + vel1[i];
numNeighborsVelocity++;
}
}
}
if (numNeighborsCenter != 0)
{
center = center / (float)numNeighborsCenter;
vel2[index] += (center - pos[index]) * rule1Scale;
}
if (numNeighborsVelocity != 0)
{
//velocity = velocity / (float)numNeighborsVelocity;
vel2[index] += velocity * rule3Scale;
}
vel2[index] += prox * rule2Scale;
if (glm::length(vel2[index]) > maxSpeed)
{
vel2[index] = glm::normalize(vel2[index]) * maxSpeed;
}
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
// find indices from pos
int x = floor((pos[index].x - gridMin.x) * inverseCellWidth);
int y = floor((pos[index].y - gridMin.y) * inverseCellWidth);
int z = floor((pos[index].z - gridMin.z) * inverseCellWidth);
int gridIndex = gridIndex3Dto1D(x, y, z, gridResolution);
gridIndices[index] = gridIndex;
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
if (index > 0)
{
if (particleGridIndices[index] != particleGridIndices[index - 1])
{
gridCellStartIndices[particleGridIndices[index]] = index;
gridCellEndIndices[particleGridIndices[index-1]] = index-1;
}
}
else
{
gridCellStartIndices[particleGridIndices[index]] = index;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int start = gridCellStartIndices[index];
int end = gridCellEndIndices[index];
if (start == -1)
{
return;
}
// loop through points in the voxel
for (int p = start; p <= end; p++)
{
glm::vec3 center(0.0, 0.0, 0.0);
glm::vec3 prox(0.0, 0.0, 0.0);
glm::vec3 velocity(0.0, 0.0, 0.0);
int numNeighborsCenter = 0;
int numNeighborsVelocity = 0;
// get quadrant and neighboring voxels
// find indices from pos
// unfold particle index
int pIndex1 = particleArrayIndices[p];
float voxelPosX = (pos[pIndex1].x - gridMin.x) * inverseCellWidth;
float voxelPosY = (pos[pIndex1].y - gridMin.y) * inverseCellWidth;
float voxelPosZ = (pos[pIndex1].z - gridMin.z) * inverseCellWidth;
int x = floor(voxelPosX);
int y = floor(voxelPosY);
int z = floor(voxelPosZ);
int startX = ((voxelPosX - x) > (0.5f * inverseCellWidth)) ? x : x < 1 ? 0 : x - 1;
int startY = ((voxelPosY - y) > (0.5f * inverseCellWidth)) ? y : y < 1 ? 0 : y - 1;
int startZ = ((voxelPosZ - z) > (0.5f * inverseCellWidth)) ? z : z < 1 ? 0 : z - 1;
// lookup particles in neighboring voxels
for (int i = 0; i < 2; i++)
{
for (int j = 0; j < 2; j++)
{
for (int k = 0; k < 2; k++)
{
int Z = startZ + i;
int Y = startY + j;
int X = startX + k;
if (Z < 0 || Z >= gridResolution ||
Y < 0 || Y >= gridResolution ||
X < 0 || X >= gridResolution)
{
continue;
}
else
{
// get the points at the current voxel
int voxelIndex = gridIndex3Dto1D(X, Y, Z, gridResolution);
int subStart = gridCellStartIndices[voxelIndex];
int subEnd = gridCellEndIndices[voxelIndex];
if (subStart != -1)
{
for (int pIdx = subStart; pIdx <= subEnd; pIdx++)
{
// unfold particle index
int pIndex2 = particleArrayIndices[pIdx];
if (pIndex2 != pIndex1)
{
float dist = glm::distance(pos[pIndex2], pos[pIndex1]);
//printf("\ndist: %f", dist);
if (dist < rule1Distance)
{
center = center + pos[pIndex2];
numNeighborsCenter++;
}
if (dist < rule2Distance)
{
prox = prox - (pos[pIndex2] - pos[pIndex1]);
}
if (dist < rule3Distance)
{
velocity = velocity + vel1[pIndex2];
numNeighborsVelocity++;
}
}
}
}
}
}
}
}
if (numNeighborsCenter != 0)
{
center = center / (float)numNeighborsCenter;
vel2[pIndex1] += (center - pos[pIndex1]) * rule1Scale;
}
if (numNeighborsVelocity != 0)
{
//velocity = velocity / (float)numNeighborsVelocity;
vel2[pIndex1] += velocity * rule3Scale;
}
vel2[pIndex1] += prox * rule2Scale;
if (glm::length(vel2[pIndex1]) > maxSpeed)
{
vel2[pIndex1] = glm::normalize(vel2[pIndex1]) * maxSpeed;
}
}
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) {
return;
}
int start = gridCellStartIndices[index];
int end = gridCellEndIndices[index];
if (start == -1)
{
return;
}
// loop through points in the voxel
for (int p = start; p <= end; p++)
{
glm::vec3 center(0.0, 0.0, 0.0);
glm::vec3 prox(0.0, 0.0, 0.0);
glm::vec3 velocity(0.0, 0.0, 0.0);
int numNeighborsCenter = 0;
int numNeighborsVelocity = 0;
// get quadrant and neighboring voxels
// find indices from pos
// no need to unfold particle index
//int pIndex1 = particleArrayIndices[p];
float voxelPosX = (pos[p].x - gridMin.x) * inverseCellWidth;
float voxelPosY = (pos[p].y - gridMin.y) * inverseCellWidth;
float voxelPosZ = (pos[p].z - gridMin.z) * inverseCellWidth;
int x = floor(voxelPosX);
int y = floor(voxelPosY);
int z = floor(voxelPosZ);
int startX = ((voxelPosX - x) > (0.5f * inverseCellWidth)) ? x : x < 1 ? 0 : x - 1;
int startY = ((voxelPosY - y) > (0.5f * inverseCellWidth)) ? y : y < 1 ? 0 : y - 1;
int startZ = ((voxelPosZ - z) > (0.5f * inverseCellWidth)) ? z : z < 1 ? 0 : z - 1;
// lookup particles in neighboring voxels
for (int i = 0; i < 2; i++)
{
for (int j = 0; j < 2; j++)
{
for (int k = 0; k < 2; k++)
{
int Z = startZ + i;
int Y = startY + j;
int X = startX + k;
// check if out of bounds
if (Z < 0 || Z >= gridResolution ||
Y < 0 || Y >= gridResolution ||
X < 0 || X >= gridResolution)
{
continue;
}
else
{
// get the points at the current voxel
int voxelIndex = gridIndex3Dto1D(X, Y, Z, gridResolution);
int subStart = gridCellStartIndices[voxelIndex];
int subEnd = gridCellEndIndices[voxelIndex];
if (subStart != -1)
{
for (int pIdx = subStart; pIdx <= subEnd; pIdx++)
{
// no need to unfold particle index
//int pIndex2 = particleArrayIndices[pIdx];
if (pIdx != p)
{
float dist = glm::distance(pos[pIdx], pos[p]);
//printf("\ndist: %f", dist);
if (dist < rule1Distance)
{
center = center + pos[pIdx];
numNeighborsCenter++;
}
if (dist < rule2Distance)
{
prox = prox - (pos[pIdx] - pos[p]);
}
if (dist < rule3Distance)
{
velocity = velocity + vel1[pIdx];
numNeighborsVelocity++;
}
}
}
}
}
}
}
}
if (numNeighborsCenter != 0)
{
center = center / (float)numNeighborsCenter;
vel2[p] += (center - pos[p]) * rule1Scale;
}
if (numNeighborsVelocity != 0)
{
//velocity = velocity / (float)numNeighborsVelocity;
vel2[p] += velocity * rule3Scale;
}
vel2[p] += prox * rule2Scale;
if (glm::length(vel2[p]) > maxSpeed)
{
vel2[p] = glm::normalize(vel2[p]) * maxSpeed;
}
}
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
// TODO-1.2 ping-pong the velocity buffers
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// update velocity
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
// swap velocity
dev_vel1 = dev_vel2;
// update position
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt,
dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridVoxels((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
// reset grid start buffer
kernResetIntBuffer << <fullBlocksPerGridVoxels, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
// reset grid end buffer
kernResetIntBuffer << <fullBlocksPerGridVoxels, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
// save start end particle indices in start end buffers
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// compute velocities
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGridVoxels, blockSize >> >(
gridCellCount, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!");
// swap velocity
dev_vel1 = dev_vel2;
// update position
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt,
dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridVoxels((gridCellCount + blockSize - 1) / blockSize);
kernComputeIndices << <fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("kernComputeIndices failed!");
// store temp
cudaMemcpy(dev_indexTempStorage, dev_particleGridIndices, sizeof(int) * numObjects, cudaMemcpyDeviceToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
thrust::device_ptr<glm::vec3> dev_thrust_values_pos(dev_pos);
thrust::device_ptr<glm::vec3> dev_thrust_values_vel1(dev_vel1);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
cudaMemcpy(dev_particleGridIndices, dev_indexTempStorage, sizeof(int) * numObjects, cudaMemcpyDeviceToDevice); // reset idx
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values_vel1);
cudaMemcpy(dev_particleGridIndices, dev_indexTempStorage, sizeof(int) * numObjects, cudaMemcpyDeviceToDevice); // reset idx
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values_pos);
// reset grid start buffer
kernResetIntBuffer << <fullBlocksPerGridVoxels, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
// reset grid end buffer
kernResetIntBuffer << <fullBlocksPerGridVoxels, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1);
checkCUDAErrorWithLine("kernResetIntBuffer failed!");
// save start end particle indices in start end buffers
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!");
// compute velocities
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGridVoxels, blockSize >> >(
gridCellCount, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!");
// swap velocity
dev_vel1 = dev_vel2;
// update position
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt,
dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_indexTempStorage);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete(intKeys);
delete(intValues);
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
c2d9a740d9c52e7c4bf690d91dfdd3ff762b1ada.hip | // !!! This is a file automatically generated by hipify!!!
#include "scan.cuh"
#include "segmented_scan_helpers.cuh"
#include "fill.cuh"
#include <contrib/libs/cub/hipcub/hipcub.hpp>
namespace NKernel {
template<typename T>
hipError_t ScanVector(const T* input, T* output, ui32 size, bool inclusive, TScanKernelContext<T>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T>;
if (inclusive) {
return hipcub::DeviceScan::InclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
} else {
return hipcub::DeviceScan::ExclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
}
}
template <class T>
struct TToSignedConversion {
using TSignedType = T;
};
template <>
struct TToSignedConversion<ui32> {
using TSignedType = int;
};
template<typename T_>
hipError_t SegmentedScanNonNegativeVector(const T_* input, T_* output, ui32 size, bool inclusive, TScanKernelContext<T_>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
T zeroValue = 0.0f;
if (inclusive) {
return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*)input, (T*)output, TNonNegativeSegmentedSum(), size, stream);
} else {
return hipcub::DeviceScan::ExclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*) input, (T*)output, TNonNegativeSegmentedSum(), zeroValue, size, stream);
}
}
template<typename T_>
hipError_t SegmentedScanAndScatterNonNegativeVector(const T_* input, const ui32* indices, T_* output,
ui32 size, bool inclusive,
TScanKernelContext<T_>& context,
TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
if (inclusive) {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, true> outputIterator((T*)output, indices, indices + size);
return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*)input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
} else {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, false> outputIterator((T*)output, indices, indices + size);
FillBuffer<T>((T*)output, 0, size, stream);
return hipcub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*) input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
}
}
template <class T>
ui64 ScanVectorTempSize(ui32 size, bool inclusive) {
ui64 sizeInBytes = 0;
if (inclusive) {
hipcub::DeviceScan::InclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size);
} else {
hipcub::DeviceScan::ExclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size);
}
return sizeInBytes;
}
#define SCAN_VECTOR(Type) \
template hipError_t ScanVector<Type>(const Type *input, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template hipError_t SegmentedScanNonNegativeVector<Type>(const Type *input, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template hipError_t SegmentedScanAndScatterNonNegativeVector<Type>(const Type *input, const ui32* indices, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template ui64 ScanVectorTempSize<Type>(ui32, bool);
SCAN_VECTOR(int)
SCAN_VECTOR(ui32)
SCAN_VECTOR(float)
SCAN_VECTOR(double)
}
| c2d9a740d9c52e7c4bf690d91dfdd3ff762b1ada.cu | #include "scan.cuh"
#include "segmented_scan_helpers.cuh"
#include "fill.cuh"
#include <contrib/libs/cub/cub/device/device_scan.cuh>
namespace NKernel {
template<typename T>
cudaError_t ScanVector(const T* input, T* output, ui32 size, bool inclusive, TScanKernelContext<T>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T>;
if (inclusive) {
return cub::DeviceScan::InclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
} else {
return cub::DeviceScan::ExclusiveSum(context.PartResults, context.NumParts, input, output, size, stream);
}
}
template <class T>
struct TToSignedConversion {
using TSignedType = T;
};
template <>
struct TToSignedConversion<ui32> {
using TSignedType = int;
};
template<typename T_>
cudaError_t SegmentedScanNonNegativeVector(const T_* input, T_* output, ui32 size, bool inclusive, TScanKernelContext<T_>& context, TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
T zeroValue = 0.0f;
if (inclusive) {
return cub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*)input, (T*)output, TNonNegativeSegmentedSum(), size, stream);
} else {
return cub::DeviceScan::ExclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*) input, (T*)output, TNonNegativeSegmentedSum(), zeroValue, size, stream);
}
}
template<typename T_>
cudaError_t SegmentedScanAndScatterNonNegativeVector(const T_* input, const ui32* indices, T_* output,
ui32 size, bool inclusive,
TScanKernelContext<T_>& context,
TCudaStream stream) {
using TKernelContext = TScanKernelContext<T_>;
using T = typename TToSignedConversion<T_>::TSignedType;
if (inclusive) {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, true> outputIterator((T*)output, indices, indices + size);
return cub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*)input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
} else {
TNonNegativeSegmentedScanOutputIterator<cub::STORE_CS, T, ptrdiff_t, false> outputIterator((T*)output, indices, indices + size);
FillBuffer<T>((T*)output, 0, size, stream);
return cub::DeviceScan::InclusiveScan((T*)context.PartResults.Get(), context.NumParts, (const T*) input, outputIterator, TNonNegativeSegmentedSum(), size, stream);
}
}
template <class T>
ui64 ScanVectorTempSize(ui32 size, bool inclusive) {
ui64 sizeInBytes = 0;
if (inclusive) {
cub::DeviceScan::InclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size);
} else {
cub::DeviceScan::ExclusiveSum<const T*, T*>(nullptr, sizeInBytes, nullptr, nullptr, size);
}
return sizeInBytes;
}
#define SCAN_VECTOR(Type) \
template cudaError_t ScanVector<Type>(const Type *input, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template cudaError_t SegmentedScanNonNegativeVector<Type>(const Type *input, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template cudaError_t SegmentedScanAndScatterNonNegativeVector<Type>(const Type *input, const ui32* indices, Type *output, ui32 size, bool inclusive, TScanKernelContext<Type>& context, TCudaStream stream); \
template ui64 ScanVectorTempSize<Type>(ui32, bool);
SCAN_VECTOR(int)
SCAN_VECTOR(ui32)
SCAN_VECTOR(float)
SCAN_VECTOR(double)
}
|
a71353b82b9aee91b9197569d6e149a0d834f2d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hello_world.h"
__global__
void hello_world_kernel() {
printf("Hello, world!\n");
}
void hello_world() {
hipLaunchKernelGGL(( hello_world_kernel) , dim3(2), dim3(2) , 0, 0, );
hipDeviceSynchronize();
} | a71353b82b9aee91b9197569d6e149a0d834f2d1.cu | #include "hello_world.h"
__global__
void hello_world_kernel() {
printf("Hello, world!\n");
}
void hello_world() {
hello_world_kernel <<< 2, 2 >>> ();
cudaDeviceSynchronize();
} |
0183953b901d7984ba430df879a5f3dc09198695.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
bool fh_equal_to(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
bool fh_less(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
bool fh_greater(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
bool fh_greater_equal_to(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
bool fh_less_equal_to(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
bool* host_logical_and(bool* column1, bool* column2)
{
column1[0] = column1[0] && column2[0];
column1[1] = column1[1] && column2[1];
free(column2);
return column1;
}
bool* host_logical_or(bool* column1, bool* column2)
{
column1[0] = column1[0] || column2[0];
column1[1] = column1[1] || column2[1];
free(column2);
return column1;
}
bool* host_compare(int_type s, int_type d, int_type op_type)
{
bool res = 0;
if (op_type == 2 && d>s ) // >
res = 1;
else if (op_type == 1 && d<s) // <
res = 1;
else if (op_type == 6 && d>=s) // >=
res = 1;
else if (op_type == 5 && d<=s) // <=
res = 1;
else if (op_type == 4 && d==s)// =
res = 1;
else // !=
if(d!=s) res = 1;
bool* temp = (bool*)malloc(2*sizeof(bool));
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(float_type s, float_type d, int_type op_type)
{
bool res = 0;
if (op_type == 2 && (d-s) > EPSILON) // >
res = 1;
else if (op_type == 1 && (s-d) > EPSILON) // <
res = 1;
else if (op_type == 6 && ((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // >=
res = 1;
else if (op_type == 5 && ((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // <=
res = 1;
else if (op_type == 4 && ((d-s) < EPSILON) && ((d-s) > -EPSILON))// =
res = 1;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
bool* temp = (bool*)malloc(2*sizeof(bool));
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(int_type* column1, int_type d, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
//cout << "comparing " << column1[0] << " " << column1[1] << " " << d << " " << op_type << endl;
if (op_type == 2 && column1[1] <= d) // >
res = 0;
else if (op_type == 1 && column1[0] >= d) // <
res = 0;
else if (op_type == 6 && column1[1] < d) // >=
res = 0;
else if (op_type == 5 && column1[0] > d) // <=
res = 0;
else if (op_type == 4 && column1[0] == d && column1[1] == d) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(float_type* column1, float_type d, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
if (op_type == 2 && fh_less_equal_to(column1[1],d)) // >
res = 0;
else if (op_type == 1 && fh_greater_equal_to(column1[0],d)) // <
res = 0;
else if (op_type == 6 && fh_less(column1[1],d)) // >=
res = 0;
else if (op_type == 5 && fh_greater(column1[0],d)) // <=
res = 0;
else if (op_type == 4 && fh_equal_to(column1[0],d) && fh_equal_to(column1[1],d)) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(int_type* column1, int_type* column2, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
if (op_type == 2 && column1[0] > column2[1]) // >
res = 0;
else if (op_type == 1 && column1[1] < column2[0]) // <
res = 0;
else if (op_type == 6 && column1[0] >= column2[1]) // >=
res = 0;
else if (op_type == 5 && column1[1] <= column2[0]) // <=
res = 0;
else if (op_type == 4 && column1[0] == column2[1] && column1[1] == column2[0]) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(float_type* column1, float_type* column2, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
if (op_type == 2 && fh_greater(column1[0],column2[1])) // >
res = 0;
else if (op_type == 1 && fh_less(column1[1],column2[0])) // <
res = 0;
else if (op_type == 6 && fh_greater_equal_to(column1[0],column2[1])) // >=
res = 0;
else if (op_type == 5 && fh_less_equal_to(column1[1],column2[0])) // <=
res = 0;
else if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(float_type* column1, int_type* column2, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
if (op_type == 2 && fh_greater(column1[0],column2[1])) // >
res = 0;
else if (op_type == 1 && fh_less(column1[1],column2[0])) // <
res = 0;
else if (op_type == 6 && fh_greater_equal_to(column1[0],column2[1])) // >=
res = 0;
else if (op_type == 5 && fh_less_equal_to(column1[1],column2[0])) // <=
res = 0;
else if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
float_type* host_op(int_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = column1[0];
temp[1] = column1[1];
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - temp[0];
temp[1] = column2[1] - temp[1];
}
else {
temp[0] = column2[0] / temp[0];
temp[1] = column2[1] / temp[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = temp[0] - column2[0];
temp[1] = temp[1] - column2[1];
}
else {
temp[0] = temp[0] / column2[0];
temp[1] = temp[1] / column2[1];
}
};
return temp;
}
int_type* host_op(int_type* column1, int_type* column2, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
float_type* host_op(float_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
int_type* host_op(int_type* column1, int_type d, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
float_type* host_op(int_type* column1, float_type d, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = column1[0];
temp[1] = column1[1];
float_type* temp1 = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp1[0] = temp[0] - d;
temp1[1] = temp[1] - d;
}
else {
temp1[0] = temp[0] / d;
temp1[1] = temp[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp1[0] = d - temp[0];
temp1[1] = d - temp[1];
}
else {
temp1[0] = d / temp[0];
temp1[1] = d / temp[1];
}
};
free(temp);
return temp1;
}
float_type* host_op(float_type* column1, float_type d, string op_type,int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
//CudaSet a contains two records - with all minimum and maximum values of the segment
//We need to determine if this segment needs to be processed
//The check takes place in host's memory
bool zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<float_type*> exe_vectors_f;
stack<int_type> exe_nums;
stack<bool*> bool_vectors;
stack<float_type> exe_nums_f;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
float_type n1_f, n2_f, res_f;
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("FLOAT") == 0
|| ss.compare("STRING") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
}
else if (ss.compare("NAME") == 0 || ss.compare("STRING") == 0) {
exe_value.push(op_value.front());
op_value.pop();
}
if (ss.compare("FLOAT") == 0) {
exe_nums_f.push(op_nums_f.front());
op_nums_f.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
exe_type.push("NUMBER");
exe_nums.push(res);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
exe_type.push("FLOAT");
exe_nums_f.push(res_f);
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[(a->columnNames)[s1_val]] == 1) {
float_type* t = a->get_host_float_by_name(s1_val);
exe_vectors_f.push(host_op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_host_int_by_name(s1_val);
exe_vectors_f.push(host_op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[(a->columnNames)[s2_val]] == 1) {
float_type* t = a->get_host_float_by_name(s2_val);
exe_vectors_f.push(host_op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_host_int_by_name(s2_val);
exe_vectors_f.push(host_op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
if (a->type[(a->columnNames)[s1_val]] == 1) {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,n1,ss,1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 1) {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,n1,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t1 = a->get_host_int_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,t1,ss,0));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t1,t,ss,0));
};
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t1 = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t1,t,ss,0));
}
else {
float_type* t1 = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,t1,ss,0));
};
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,s3,ss,0));
//free s3
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,0));
hipFree(s3);
}
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,t, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,0));
hipFree(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,s3,ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,1));
hipFree(s3);
}
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,t,ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,1));
hipFree(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(s3,n1, ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1, ss,1));
hipFree(s3);
}
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") || s2.compare("VECTOR F") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(s3,n1, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1, ss,0));
hipFree(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,1));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,1));
hipFree(s3);
}
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
n1_f = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,0));
hipFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,0));
hipFree(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(s3, s4,ss,1));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,1));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,0));
hipFree(s3);
hipFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,1));
hipFree(s3);
hipFree(s4);
}
}
else if (ss.compare("CMP") == 0) {
int_type cmp_type = op_nums.front();
op_nums.pop();
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1,n2,cmp_type));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,n2_f,cmp_type));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NUMBER") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,float_type(n2),cmp_type));
}
else if (s1.compare("NUMBER") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,float_type(n2),cmp_type));
}
else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
unsigned int colIndex1 = (a->columnNames).find(s2_val)->second;
CudaChar* cc = (CudaChar*)(a->h_columns)[colIndex1];
string str1, str2;
cc->findMinMax(str1,str2);
bool* bv = (bool*)malloc(2*sizeof(bool));
if(str1.compare(s1_val) == 0 && str2.compare(s1_val) == 0) {
bv[0] = 1;
bv[1] = 1;
}
else {
bv[0] = 0;
bv[1] = 0;
};
exe_type.push("VECTOR");
bool_vectors.push(bv);
}
else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
unsigned int colIndex1 = (a->columnNames).find(s1_val)->second;
CudaChar* cc = (CudaChar*)(a->h_columns)[colIndex1];
string str1, str2;
cc->findMinMax(str1,str2);
bool* bv = (bool*)malloc(2*sizeof(bool));
if(str1.compare(s2_val) == 0 && str2.compare(s2_val) == 0) {
bv[0] = 1;
bv[1] = 1;
}
else {
bv[0] = 0;
bv[1] = 0;
};
exe_type.push("VECTOR");
bool_vectors.push(bv);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s1_val = exe_value.top();
exe_value.pop();
//cout << "comparing " << n1 << " and " << s1_val << endl;
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(float_type)n1,cmp_type));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
//cout << "comparing " << n1 << " and " << s2_val << endl;
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(float_type)n1,cmp_type));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s1_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
};
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
hipFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
hipFree(s3);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
hipFree(s3);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
hipFree(s3);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
hipFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
hipFree(s3);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
hipFree(s3);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
hipFree(s3);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("NAME") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(s3,t,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
hipFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("NAME") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
hipFree(s3);
}
else if (s1.compare("NAME") == 0 && s2.compare("VECTOR F") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(s3,t,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
hipFree(s3);
}
else if (s1.compare("NAME") == 0 && s2.compare("VECTOR") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
hipFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s2,s3,cmp_type));
hipFree(s3);
hipFree(s2);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s2 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s2,s3,cmp_type));
hipFree(s3);
hipFree(s2);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,s2,cmp_type));
hipFree(s3);
hipFree(s2);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,s2,cmp_type));
hipFree(s3);
hipFree(s2);
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t1 = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(t1,t,cmp_type));
}
else {
float_type* t1 = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t1,t,cmp_type));
};
}
else {
cmp_type = reverse_op(cmp_type);
float_type* t = a->get_host_float_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t1 = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(t,t1,cmp_type));
}
else {
float_type* t1 = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,t1,cmp_type));
};
}
}
}
else if (ss.compare("AND") == 0) {
bool* s3 = bool_vectors.top();
bool_vectors.pop();
bool* s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_logical_and(s2,s3));
hipFree(s3);
}
else if (ss.compare("OR") == 0) {
bool* s3 = bool_vectors.top();
bool_vectors.pop();
bool* s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_logical_or(s2,s3));
hipFree(s3);
}
else {
cout << "found nothing " << endl;
}
};
};
bool* sv = bool_vectors.top();
if(sv[0] && sv[1]) {
free(sv);
return 1;
}
else {
free(sv);
return 0;
};
}
| 0183953b901d7984ba430df879a5f3dc09198695.cu | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
bool fh_equal_to(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
bool fh_less(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
bool fh_greater(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
bool fh_greater_equal_to(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
bool fh_less_equal_to(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
bool* host_logical_and(bool* column1, bool* column2)
{
column1[0] = column1[0] && column2[0];
column1[1] = column1[1] && column2[1];
free(column2);
return column1;
}
bool* host_logical_or(bool* column1, bool* column2)
{
column1[0] = column1[0] || column2[0];
column1[1] = column1[1] || column2[1];
free(column2);
return column1;
}
bool* host_compare(int_type s, int_type d, int_type op_type)
{
bool res = 0;
if (op_type == 2 && d>s ) // >
res = 1;
else if (op_type == 1 && d<s) // <
res = 1;
else if (op_type == 6 && d>=s) // >=
res = 1;
else if (op_type == 5 && d<=s) // <=
res = 1;
else if (op_type == 4 && d==s)// =
res = 1;
else // !=
if(d!=s) res = 1;
bool* temp = (bool*)malloc(2*sizeof(bool));
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(float_type s, float_type d, int_type op_type)
{
bool res = 0;
if (op_type == 2 && (d-s) > EPSILON) // >
res = 1;
else if (op_type == 1 && (s-d) > EPSILON) // <
res = 1;
else if (op_type == 6 && ((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // >=
res = 1;
else if (op_type == 5 && ((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) // <=
res = 1;
else if (op_type == 4 && ((d-s) < EPSILON) && ((d-s) > -EPSILON))// =
res = 1;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
bool* temp = (bool*)malloc(2*sizeof(bool));
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(int_type* column1, int_type d, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
//cout << "comparing " << column1[0] << " " << column1[1] << " " << d << " " << op_type << endl;
if (op_type == 2 && column1[1] <= d) // >
res = 0;
else if (op_type == 1 && column1[0] >= d) // <
res = 0;
else if (op_type == 6 && column1[1] < d) // >=
res = 0;
else if (op_type == 5 && column1[0] > d) // <=
res = 0;
else if (op_type == 4 && column1[0] == d && column1[1] == d) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(float_type* column1, float_type d, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
if (op_type == 2 && fh_less_equal_to(column1[1],d)) // >
res = 0;
else if (op_type == 1 && fh_greater_equal_to(column1[0],d)) // <
res = 0;
else if (op_type == 6 && fh_less(column1[1],d)) // >=
res = 0;
else if (op_type == 5 && fh_greater(column1[0],d)) // <=
res = 0;
else if (op_type == 4 && fh_equal_to(column1[0],d) && fh_equal_to(column1[1],d)) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(int_type* column1, int_type* column2, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
if (op_type == 2 && column1[0] > column2[1]) // >
res = 0;
else if (op_type == 1 && column1[1] < column2[0]) // <
res = 0;
else if (op_type == 6 && column1[0] >= column2[1]) // >=
res = 0;
else if (op_type == 5 && column1[1] <= column2[0]) // <=
res = 0;
else if (op_type == 4 && column1[0] == column2[1] && column1[1] == column2[0]) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(float_type* column1, float_type* column2, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
if (op_type == 2 && fh_greater(column1[0],column2[1])) // >
res = 0;
else if (op_type == 1 && fh_less(column1[1],column2[0])) // <
res = 0;
else if (op_type == 6 && fh_greater_equal_to(column1[0],column2[1])) // >=
res = 0;
else if (op_type == 5 && fh_less_equal_to(column1[1],column2[0])) // <=
res = 0;
else if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
bool* host_compare(float_type* column1, int_type* column2, int_type op_type)
{
bool* temp = (bool*)malloc(2*sizeof(bool));
bool res = 1;
if (op_type == 2 && fh_greater(column1[0],column2[1])) // >
res = 0;
else if (op_type == 1 && fh_less(column1[1],column2[0])) // <
res = 0;
else if (op_type == 6 && fh_greater_equal_to(column1[0],column2[1])) // >=
res = 0;
else if (op_type == 5 && fh_less_equal_to(column1[1],column2[0])) // <=
res = 0;
else if (op_type == 4 && fh_equal_to(column1[0], column2[1]) && fh_equal_to(column1[1],column2[0])) // =
res = 0;
temp[0] = res;
temp[1] = res;
return temp;
}
float_type* host_op(int_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = column1[0];
temp[1] = column1[1];
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - temp[0];
temp[1] = column2[1] - temp[1];
}
else {
temp[0] = column2[0] / temp[0];
temp[1] = column2[1] / temp[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = temp[0] * column2[0];
temp[1] = temp[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = temp[0] + column2[0];
temp[1] = temp[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = temp[0] - column2[0];
temp[1] = temp[1] - column2[1];
}
else {
temp[0] = temp[0] / column2[0];
temp[1] = temp[1] / column2[1];
}
};
return temp;
}
int_type* host_op(int_type* column1, int_type* column2, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
float_type* host_op(float_type* column1, float_type* column2, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - column2[0];
temp[1] = column1[1] - column2[1];
}
else {
temp[0] = column1[0] / column2[0];
temp[1] = column1[1] / column2[1];
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * column2[0];
temp[1] = column1[1] * column2[1];
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + column2[0];
temp[1] = column1[1] + column2[1];
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column2[0] - column1[0];
temp[1] = column2[1] - column1[1];
}
else {
temp[0] = column2[0] / column1[0];
temp[1] = column2[1] / column1[1];
}
}
return temp;
}
int_type* host_op(int_type* column1, int_type d, string op_type, int reverse)
{
int_type* temp = (int_type*)malloc(2*int_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
float_type* host_op(int_type* column1, float_type d, string op_type, int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
temp[0] = column1[0];
temp[1] = column1[1];
float_type* temp1 = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp1[0] = temp[0] - d;
temp1[1] = temp[1] - d;
}
else {
temp1[0] = temp[0] / d;
temp1[1] = temp[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp1[0] = temp[0] * d;
temp1[1] = temp[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp1[0] = temp[0] + d;
temp1[1] = temp[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp1[0] = d - temp[0];
temp1[1] = d - temp[1];
}
else {
temp1[0] = d / temp[0];
temp1[1] = d / temp[1];
}
};
free(temp);
return temp1;
}
float_type* host_op(float_type* column1, float_type d, string op_type,int reverse)
{
float_type* temp = (float_type*)malloc(2*float_size);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = column1[0] - d;
temp[1] = column1[1] - d;
}
else {
temp[0] = column1[0] / d;
temp[1] = column1[1] / d;
}
}
else {
if (op_type.compare("MUL") == 0) {
temp[0] = column1[0] * d;
temp[1] = column1[1] * d;
}
else if (op_type.compare("ADD") == 0) {
temp[0] = column1[0] + d;
temp[1] = column1[1] + d;
}
else if (op_type.compare("MINUS") == 0) {
temp[0] = d - column1[0];
temp[1] = d - column1[1];
}
else {
temp[0] = d / column1[0];
temp[1] = d / column1[1];
}
};
return temp;
}
//CudaSet a contains two records - with all minimum and maximum values of the segment
//We need to determine if this segment needs to be processed
//The check takes place in host's memory
bool zone_map_check(queue<string> op_type, queue<string> op_value, queue<int_type> op_nums,queue<float_type> op_nums_f, CudaSet* a)
{
stack<string> exe_type;
stack<string> exe_value;
stack<int_type*> exe_vectors;
stack<float_type*> exe_vectors_f;
stack<int_type> exe_nums;
stack<bool*> bool_vectors;
stack<float_type> exe_nums_f;
string s1, s2, s1_val, s2_val;
int_type n1, n2, res;
float_type n1_f, n2_f, res_f;
for(int i=0; !op_type.empty(); ++i, op_type.pop()) {
string ss = op_type.front();
if (ss.compare("NAME") == 0 || ss.compare("NUMBER") == 0 || ss.compare("VECTOR") == 0 || ss.compare("FLOAT") == 0
|| ss.compare("STRING") == 0) {
exe_type.push(ss);
if (ss.compare("NUMBER") == 0) {
exe_nums.push(op_nums.front());
op_nums.pop();
}
else if (ss.compare("NAME") == 0 || ss.compare("STRING") == 0) {
exe_value.push(op_value.front());
op_value.pop();
}
if (ss.compare("FLOAT") == 0) {
exe_nums_f.push(op_nums_f.front());
op_nums_f.pop();
}
}
else {
if (ss.compare("MUL") == 0 || ss.compare("ADD") == 0 || ss.compare("DIV") == 0 || ss.compare("MINUS") == 0) {
// get 2 values from the stack
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
if (ss.compare("ADD") == 0 )
res = n1+n2;
else if (ss.compare("MUL") == 0 )
res = n1*n2;
else if (ss.compare("DIV") == 0 )
res = n1/n2;
else
res = n1-n2;
exe_type.push("NUMBER");
exe_nums.push(res);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
if (ss.compare("ADD") == 0 )
res_f = n1_f+n2_f;
else if (ss.compare("MUL") == 0 )
res_f = n1_f*n2_f;
else if (ss.compare("DIV") == 0 )
res_f = n1_f/n2_f;
else
res_f = n1_f-n2_f;
exe_type.push("FLOAT");
exe_nums_f.push(res_f);
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR F");
if (a->type[(a->columnNames)[s1_val]] == 1) {
float_type* t = a->get_host_float_by_name(s1_val);
exe_vectors_f.push(host_op(t,n1_f,ss,1));
}
else {
int_type* t = a->get_host_int_by_name(s1_val);
exe_vectors_f.push(host_op(t,n1_f,ss,1));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR F");
if (a->type[(a->columnNames)[s2_val]] == 1) {
float_type* t = a->get_host_float_by_name(s2_val);
exe_vectors_f.push(host_op(t,n1_f,ss,0));
}
else {
int_type* t = a->get_host_int_by_name(s2_val);
exe_vectors_f.push(host_op(t,n1_f,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
s1_val = exe_value.top();
exe_value.pop();
n1 = exe_nums.top();
exe_nums.pop();
if (a->type[(a->columnNames)[s1_val]] == 1) {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,(float_type)n1,ss,1));
}
else {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,n1,ss,1));
};
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 1) {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,(float_type)n1,ss,0));
}
else {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,n1,ss,0));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t1 = a->get_host_int_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,t1,ss,0));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t1,t,ss,0));
};
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t1 = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t1,t,ss,0));
}
else {
float_type* t1 = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,t1,ss,0));
};
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0 ) && s2.compare("NAME") == 0) {
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,s3,ss,0));
//free s3
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,0));
cudaFree(s3);
}
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,t, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,0));
cudaFree(s3);
}
};
}
else if ((s2.compare("VECTOR") == 0 || s2.compare("VECTOR F") == 0 ) && s1.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(t,s3,ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,1));
cudaFree(s3);
}
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,t,ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(t,s3,ss,1));
cudaFree(s3);
}
};
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(s3,n1, ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1, ss,1));
cudaFree(s3);
}
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") || s2.compare("VECTOR F") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(s3,n1, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1, ss,0));
cudaFree(s3);
}
}
else if ((s1.compare("VECTOR") == 0 || s1.compare("VECTOR F") == 0) && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
if (s1.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,1));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,1));
cudaFree(s3);
}
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
n1_f = exe_nums.top();
exe_nums.pop();
if (s2.compare("VECTOR") == 0 ) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,0));
cudaFree(s3);
}
else {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3,n1_f, ss,0));
cudaFree(s3);
}
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s4 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
exe_vectors.push(host_op(s3, s4,ss,1));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,1));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,0));
cudaFree(s3);
cudaFree(s4);
}
else if(s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s4 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR F");
exe_vectors_f.push(host_op(s3, s4,ss,1));
cudaFree(s3);
cudaFree(s4);
}
}
else if (ss.compare("CMP") == 0) {
int_type cmp_type = op_nums.front();
op_nums.pop();
s1 = exe_type.top();
exe_type.pop();
s2 = exe_type.top();
exe_type.pop();
if (s1.compare("NUMBER") == 0 && s2.compare("NUMBER") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1,n2,cmp_type));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,n2_f,cmp_type));
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NUMBER") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,float_type(n2),cmp_type));
}
else if (s1.compare("NUMBER") == 0 && s2.compare("FLOAT") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
n2 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(n1_f,float_type(n2),cmp_type));
}
else if (s1.compare("STRING") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
unsigned int colIndex1 = (a->columnNames).find(s2_val)->second;
CudaChar* cc = (CudaChar*)(a->h_columns)[colIndex1];
string str1, str2;
cc->findMinMax(str1,str2);
bool* bv = (bool*)malloc(2*sizeof(bool));
if(str1.compare(s1_val) == 0 && str2.compare(s1_val) == 0) {
bv[0] = 1;
bv[1] = 1;
}
else {
bv[0] = 0;
bv[1] = 0;
};
exe_type.push("VECTOR");
bool_vectors.push(bv);
}
else if (s1.compare("NAME") == 0 && s2.compare("STRING") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
unsigned int colIndex1 = (a->columnNames).find(s1_val)->second;
CudaChar* cc = (CudaChar*)(a->h_columns)[colIndex1];
string str1, str2;
cc->findMinMax(str1,str2);
bool* bv = (bool*)malloc(2*sizeof(bool));
if(str1.compare(s2_val) == 0 && str2.compare(s2_val) == 0) {
bv[0] = 1;
bv[1] = 1;
}
else {
bv[0] = 0;
bv[1] = 0;
};
exe_type.push("VECTOR");
bool_vectors.push(bv);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("NAME") == 0) {
n1 = exe_nums.top();
exe_nums.pop();
s1_val = exe_value.top();
exe_value.pop();
//cout << "comparing " << n1 << " and " << s1_val << endl;
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(float_type)n1,cmp_type));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
n1 = exe_nums.top();
exe_nums.pop();
s2_val = exe_value.top();
exe_value.pop();
//cout << "comparing " << n1 << " and " << s2_val << endl;
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,(float_type)n1,cmp_type));
};
}
else if (s1.compare("FLOAT") == 0 && s2.compare("NAME") == 0) {
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s1_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s1_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
};
}
else if (s1.compare("NAME") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
n1_f = exe_nums_f.top();
exe_nums_f.pop();
s2_val = exe_value.top();
exe_value.pop();
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
exe_type.push("VECTOR");
bool_vectors.push(host_compare(t,n1_f,cmp_type));
};
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
cudaFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("NUMBER") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
cudaFree(s3);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
cudaFree(s3);
}
else if (s1.compare("NUMBER") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1 = exe_nums.top();
exe_nums.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1,cmp_type));
cudaFree(s3);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
cudaFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("FLOAT") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
cudaFree(s3);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
cudaFree(s3);
}
else if (s1.compare("FLOAT") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
n1_f = exe_nums_f.top();
exe_nums_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,n1_f,cmp_type));
cudaFree(s3);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("NAME") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(s3,t,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
cudaFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("NAME") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
cudaFree(s3);
}
else if (s1.compare("NAME") == 0 && s2.compare("VECTOR F") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(s3,t,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
cudaFree(s3);
}
else if (s1.compare("NAME") == 0 && s2.compare("VECTOR") == 0) {
cmp_type = reverse_op(cmp_type);
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
}
else {
float_type* t = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,s3,cmp_type));
};
cudaFree(s3);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR") == 0) {
int_type* s3 = exe_vectors.top();
exe_vectors.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s2,s3,cmp_type));
cudaFree(s3);
cudaFree(s2);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
float_type* s2 = exe_vectors_f.top();
exe_vectors_f.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s2,s3,cmp_type));
cudaFree(s3);
cudaFree(s2);
}
else if (s1.compare("VECTOR F") == 0 && s2.compare("VECTOR") == 0) {
cmp_type = reverse_op(cmp_type);
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,s2,cmp_type));
cudaFree(s3);
cudaFree(s2);
}
else if (s1.compare("VECTOR") == 0 && s2.compare("VECTOR F") == 0) {
float_type* s3 = exe_vectors_f.top();
exe_vectors_f.pop();
int_type* s2 = exe_vectors.top();
exe_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_compare(s3,s2,cmp_type));
cudaFree(s3);
cudaFree(s2);
}
else if (s1.compare("NAME") == 0 && s2.compare("NAME") == 0) {
s1_val = exe_value.top();
exe_value.pop();
s2_val = exe_value.top();
exe_value.pop();
exe_type.push("VECTOR");
if (a->type[(a->columnNames)[s1_val]] == 0) {
int_type* t = a->get_host_int_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t1 = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(t1,t,cmp_type));
}
else {
float_type* t1 = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t1,t,cmp_type));
};
}
else {
cmp_type = reverse_op(cmp_type);
float_type* t = a->get_host_float_by_name(s1_val);
if (a->type[(a->columnNames)[s2_val]] == 0) {
int_type* t1 = a->get_host_int_by_name(s2_val);
bool_vectors.push(host_compare(t,t1,cmp_type));
}
else {
float_type* t1 = a->get_host_float_by_name(s2_val);
bool_vectors.push(host_compare(t,t1,cmp_type));
};
}
}
}
else if (ss.compare("AND") == 0) {
bool* s3 = bool_vectors.top();
bool_vectors.pop();
bool* s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_logical_and(s2,s3));
cudaFree(s3);
}
else if (ss.compare("OR") == 0) {
bool* s3 = bool_vectors.top();
bool_vectors.pop();
bool* s2 = bool_vectors.top();
bool_vectors.pop();
exe_type.push("VECTOR");
bool_vectors.push(host_logical_or(s2,s3));
cudaFree(s3);
}
else {
cout << "found nothing " << endl;
}
};
};
bool* sv = bool_vectors.top();
if(sv[0] && sv[1]) {
free(sv);
return 1;
}
else {
free(sv);
return 0;
};
}
|
82db0836adc65ad9f9b064c34adbe984b984dbad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_blur.h"
#include "launch_utils.h"
#include "ImageApron.h"
#ifndef M_PI
// Some trouble with Maths defines with MSVC
#define M_PI 3.14159265358979323846
#endif
namespace roo {
//////////////////////////////////////////////////////
// Small Radius Gaussian Blur
//////////////////////////////////////////////////////
template<typename TO, typename TI>
__global__ void KernBlurX(Image<TO> out, Image<TI> in)
{
const unsigned x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y*blockDim.y + threadIdx.y;
if(x==0) {
out(x,y) = (2*in(x,y) + in(x+1,y)) / 3.0f;
}else if(x== in.w-1) {
out(x,y) = (2*in(x,y) + in(x-1,y)) / 3.0f;
}else{
out(x,y) = (in(x-1,y) + 2*in(x,y) + in(x+1,y)) / 4.0f;
}
}
template<typename TO, typename TI>
__global__ void KernBlurY(Image<TO> out, Image<TI> in)
{
const unsigned x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y*blockDim.y + threadIdx.y;
if(y==0) {
out(x,y) = (2*in(x,y) + in(x,y+1)) / 3.0f;
}else if(y== in.h-1) {
out(x,y) = (2*in(x,y) + in(x,y-1)) / 3.0f;
}else{
out(x,y) = (in(x,y-1) + 2*in(x,y) + in(x,y+1)) / 4.0f;
}
}
void Blur(Image<unsigned char> out, Image<unsigned char> in, Image<unsigned char> temp )
{
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, out, 16, 16);
hipLaunchKernelGGL(( KernBlurX<unsigned char,unsigned char>), dim3(gridDim),dim3(blockDim), 0, 0, temp,in);
hipLaunchKernelGGL(( KernBlurY<unsigned char,unsigned char>), dim3(gridDim),dim3(blockDim), 0, 0, out,temp);
}
//////////////////////////////////////////////////////
// Larger radius Gaussian Blur
// http://http.developer.nvidia.com/GPUGems3/gpugems3_ch40.html
//////////////////////////////////////////////////////
template<typename TO, typename TI, unsigned MAXBW, unsigned MAXBH, unsigned RAD>
__global__ void KernGaussianBlurX(Image<TO> out, Image<TI> in, float g0, float g1)
{
const unsigned x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y*blockDim.y + threadIdx.y;
__shared__ ImageApronRows<TI,MAXBW,MAXBH,0> apron;
apron.CacheImage(in);
__syncthreads();
if(out.InBounds(x,y)) {
float pixsum = 0;
float gsum = 0;
float g2 = g1 * g1;
#pragma unroll
for (int i = 0; i < RAD; i++) {
// g0 is current gaussian coefficient for sample i
gsum += g0;
pixsum += g0 * apron.GetRelThreadClampX(i,0);
pixsum += g0 * apron.GetRelThreadClampX(-i,0);
g0 *= g1;
g1 *= g2;
}
out(x,y) = max(0.0f,min(pixsum / (2*gsum),255.0f));
}
}
template<typename TO, typename TI, unsigned MAXBW, unsigned MAXBH, unsigned RAD>
__global__ void KernGaussianBlurY(Image<TO> out, Image<TI> in, float g0, float g1)
{
const unsigned x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y*blockDim.y + threadIdx.y;
__shared__ ImageApronRows<TI,MAXBW,MAXBH,0> apron;
apron.CacheImage(in);
__syncthreads();
if(out.InBounds(x,y)) {
float pixsum = 0;
float gsum = 0;
float g2 = g1 * g1;
#pragma unroll
for (int i = 0; i <= RAD; i++) {
// g0 is current gaussian coefficient for sample i
gsum += g0;
pixsum += g0 * apron.GetRelThreadClampY(0, i);
pixsum += g0 * apron.GetRelThreadClampY(0,-i);
g0 *= g1;
g1 *= g2;
}
out(x,y) = max(0.0f,min(pixsum / (2*gsum),255.0f));
}
}
template<typename Tout, typename Tin, unsigned MAXRAD, unsigned MAXIMGDIM>
void GaussianBlur(Image<Tout> out, Image<Tin> in, Image<Tout> temp, float sigma)
{
if(sigma == 0 ) {
out.CopyFrom(in);
}else{
dim3 blockDim, gridDim;
const float delta = 1;
const float g0 = 1.0 / (sqrt(2.0 * M_PI) * sigma);
const float g1 = exp(-0.5 * delta * delta / (sigma * sigma));
InitDimFromOutputImageOver(blockDim,gridDim, out, out.w,1);
hipLaunchKernelGGL(( KernGaussianBlurX<unsigned char, unsigned char, MAXIMGDIM, 1, MAXRAD>), dim3(gridDim),dim3(blockDim), 0, 0, temp,in, g0, g1);
InitDimFromOutputImageOver(blockDim,gridDim, out, 1, out.h);
hipLaunchKernelGGL(( KernGaussianBlurY<unsigned char, unsigned char, 1, MAXIMGDIM, MAXRAD>), dim3(gridDim),dim3(blockDim), 0, 0, out,temp, g0, g1);
}
}
template KANGAROO_EXPORT void GaussianBlur<unsigned char,unsigned char, 5, 1024>(Image<unsigned char>, Image<unsigned char>, Image<unsigned char>, float);
template KANGAROO_EXPORT void GaussianBlur<unsigned char,unsigned char, 10, 1024>(Image<unsigned char>, Image<unsigned char>, Image<unsigned char>, float);
template KANGAROO_EXPORT void GaussianBlur<unsigned char,unsigned char, 15, 1024>(Image<unsigned char>, Image<unsigned char>, Image<unsigned char>, float);
template KANGAROO_EXPORT void GaussianBlur<unsigned char,unsigned char, 20, 1024>(Image<unsigned char>, Image<unsigned char>, Image<unsigned char>, float);
}
| 82db0836adc65ad9f9b064c34adbe984b984dbad.cu | #include "cu_blur.h"
#include "launch_utils.h"
#include "ImageApron.h"
#ifndef M_PI
// Some trouble with Maths defines with MSVC
#define M_PI 3.14159265358979323846
#endif
namespace roo {
//////////////////////////////////////////////////////
// Small Radius Gaussian Blur
//////////////////////////////////////////////////////
template<typename TO, typename TI>
__global__ void KernBlurX(Image<TO> out, Image<TI> in)
{
const unsigned x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y*blockDim.y + threadIdx.y;
if(x==0) {
out(x,y) = (2*in(x,y) + in(x+1,y)) / 3.0f;
}else if(x== in.w-1) {
out(x,y) = (2*in(x,y) + in(x-1,y)) / 3.0f;
}else{
out(x,y) = (in(x-1,y) + 2*in(x,y) + in(x+1,y)) / 4.0f;
}
}
template<typename TO, typename TI>
__global__ void KernBlurY(Image<TO> out, Image<TI> in)
{
const unsigned x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y*blockDim.y + threadIdx.y;
if(y==0) {
out(x,y) = (2*in(x,y) + in(x,y+1)) / 3.0f;
}else if(y== in.h-1) {
out(x,y) = (2*in(x,y) + in(x,y-1)) / 3.0f;
}else{
out(x,y) = (in(x,y-1) + 2*in(x,y) + in(x,y+1)) / 4.0f;
}
}
void Blur(Image<unsigned char> out, Image<unsigned char> in, Image<unsigned char> temp )
{
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, out, 16, 16);
KernBlurX<unsigned char,unsigned char><<<gridDim,blockDim>>>(temp,in);
KernBlurY<unsigned char,unsigned char><<<gridDim,blockDim>>>(out,temp);
}
//////////////////////////////////////////////////////
// Larger radius Gaussian Blur
// http://http.developer.nvidia.com/GPUGems3/gpugems3_ch40.html
//////////////////////////////////////////////////////
template<typename TO, typename TI, unsigned MAXBW, unsigned MAXBH, unsigned RAD>
__global__ void KernGaussianBlurX(Image<TO> out, Image<TI> in, float g0, float g1)
{
const unsigned x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y*blockDim.y + threadIdx.y;
__shared__ ImageApronRows<TI,MAXBW,MAXBH,0> apron;
apron.CacheImage(in);
__syncthreads();
if(out.InBounds(x,y)) {
float pixsum = 0;
float gsum = 0;
float g2 = g1 * g1;
#pragma unroll
for (int i = 0; i < RAD; i++) {
// g0 is current gaussian coefficient for sample i
gsum += g0;
pixsum += g0 * apron.GetRelThreadClampX(i,0);
pixsum += g0 * apron.GetRelThreadClampX(-i,0);
g0 *= g1;
g1 *= g2;
}
out(x,y) = max(0.0f,min(pixsum / (2*gsum),255.0f));
}
}
template<typename TO, typename TI, unsigned MAXBW, unsigned MAXBH, unsigned RAD>
__global__ void KernGaussianBlurY(Image<TO> out, Image<TI> in, float g0, float g1)
{
const unsigned x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y*blockDim.y + threadIdx.y;
__shared__ ImageApronRows<TI,MAXBW,MAXBH,0> apron;
apron.CacheImage(in);
__syncthreads();
if(out.InBounds(x,y)) {
float pixsum = 0;
float gsum = 0;
float g2 = g1 * g1;
#pragma unroll
for (int i = 0; i <= RAD; i++) {
// g0 is current gaussian coefficient for sample i
gsum += g0;
pixsum += g0 * apron.GetRelThreadClampY(0, i);
pixsum += g0 * apron.GetRelThreadClampY(0,-i);
g0 *= g1;
g1 *= g2;
}
out(x,y) = max(0.0f,min(pixsum / (2*gsum),255.0f));
}
}
template<typename Tout, typename Tin, unsigned MAXRAD, unsigned MAXIMGDIM>
void GaussianBlur(Image<Tout> out, Image<Tin> in, Image<Tout> temp, float sigma)
{
if(sigma == 0 ) {
out.CopyFrom(in);
}else{
dim3 blockDim, gridDim;
const float delta = 1;
const float g0 = 1.0 / (sqrt(2.0 * M_PI) * sigma);
const float g1 = exp(-0.5 * delta * delta / (sigma * sigma));
InitDimFromOutputImageOver(blockDim,gridDim, out, out.w,1);
KernGaussianBlurX<unsigned char, unsigned char, MAXIMGDIM, 1, MAXRAD><<<gridDim,blockDim>>>(temp,in, g0, g1);
InitDimFromOutputImageOver(blockDim,gridDim, out, 1, out.h);
KernGaussianBlurY<unsigned char, unsigned char, 1, MAXIMGDIM, MAXRAD><<<gridDim,blockDim>>>(out,temp, g0, g1);
}
}
template KANGAROO_EXPORT void GaussianBlur<unsigned char,unsigned char, 5, 1024>(Image<unsigned char>, Image<unsigned char>, Image<unsigned char>, float);
template KANGAROO_EXPORT void GaussianBlur<unsigned char,unsigned char, 10, 1024>(Image<unsigned char>, Image<unsigned char>, Image<unsigned char>, float);
template KANGAROO_EXPORT void GaussianBlur<unsigned char,unsigned char, 15, 1024>(Image<unsigned char>, Image<unsigned char>, Image<unsigned char>, float);
template KANGAROO_EXPORT void GaussianBlur<unsigned char,unsigned char, 20, 1024>(Image<unsigned char>, Image<unsigned char>, Image<unsigned char>, float);
}
|
7b9cbbdbbe72b5db570fb1b120b51ea86b15034e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* cub_impl.cu
* Copyright (C) 2016- CloudBrain <byzhang@>
*/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include "cub_impl.h"
#include "distance.h"
#include <hipcub/hipcub.hpp>
#include <glog/logging.h>
using namespace cub;
using namespace std;
//static const uint32_t num_data_per_block = 1024;
static hipcub::CachingDeviceAllocator allocator_;
kNN_Impl_CUB::~kNN_Impl_CUB() {
}
kNN_Impl_CUB::kNN_Impl_CUB(const std::vector<uint32_t>& data, uint32_t num_data, uint32_t num_dim)
: kNN::impl(data, num_data, num_dim) {
}
#define CubDebugReturn(e, r) if (cub::Debug((e), __FILE__, __LINE__)) { return r; }
std::vector<uint32_t> kNN_Impl_CUB::search(const std::vector<uint32_t>& query, uint32_t top_k) {
std::vector<uint32_t> indexes{};
if (query.size() != num_dim_) {
LOG_EVERY_N(ERROR, 10000) << "size mismatch:"
<< "query = " << query.size()
<< ", num_dim = " << num_dim_;
return indexes;
}
// TODO: thread local
uint32_t* d_query = nullptr;
auto error = allocator_.DeviceAllocate((void**)&d_query, sizeof(uint32_t) * num_dim_);
if (error != hipSuccess) {
LOG_EVERY_N(ERROR, 1000) << "error " << error << " when aollcating d_query:" << num_dim_;
return indexes;
}
CubDebugReturn(hipMemcpy(d_query, query.data(), sizeof(uint32_t) * num_dim_, hipMemcpyHostToDevice), indexes);
// TODO: thread local
DoubleBuffer<KEY_T> d_keys;
DoubleBuffer<uint32_t> d_values;
CubDebugReturn(allocator_.DeviceAllocate((void**)&d_keys.d_buffers[0], sizeof(KEY_T) * num_data_), indexes);
CubDebugReturn(allocator_.DeviceAllocate((void**)&d_keys.d_buffers[1], sizeof(KEY_T) * num_data_), indexes);
CubDebugReturn(allocator_.DeviceAllocate((void**)&d_values.d_buffers[0], sizeof(uint32_t) * num_data_), indexes);
CubDebugReturn(allocator_.DeviceAllocate((void**)&d_values.d_buffers[1], sizeof(uint32_t) * num_data_), indexes);
auto* keys = d_keys.d_buffers[0];
auto* values = d_values.d_buffers[0];
hipLaunchKernelGGL(( hamming_distance), dim3(tex_height_), dim3(num_data_per_block), num_dim_ * sizeof(uint32_t), 0, keys, values, d_query, tex_, tex_height_, num_dim_, num_data_per_block, num_data_);
// Allocate temporary storage
size_t temp_storage_bytes = 0;
void *d_temp_storage = nullptr;
CubDebugReturn(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_data_), indexes);
CubDebugReturn(allocator_.DeviceAllocate(&d_temp_storage, temp_storage_bytes), indexes);
// Real sort
d_keys.selector = d_values.selector = 0;
CubDebugReturn(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_data_), indexes);
hipDeviceSynchronize();
// copy to host
indexes.resize(min(top_k, num_data_));
CubDebugReturn(hipMemcpy(indexes.data(), d_values.Current(), sizeof(uint32_t) * indexes.size(), hipMemcpyDeviceToHost), indexes);
// cleanup
if (d_keys.d_buffers[0]) CubDebugExit(allocator_.DeviceFree(d_keys.d_buffers[0]));
if (d_keys.d_buffers[1]) CubDebugExit(allocator_.DeviceFree(d_keys.d_buffers[1]));
if (d_values.d_buffers[0]) CubDebugExit(allocator_.DeviceFree(d_values.d_buffers[0]));
if (d_values.d_buffers[1]) CubDebugExit(allocator_.DeviceFree(d_values.d_buffers[1]));
if (d_temp_storage) CubDebugExit(allocator_.DeviceFree(d_temp_storage));
if (d_query) CubDebugExit(allocator_.DeviceFree(d_query));
return indexes;
}
| 7b9cbbdbbe72b5db570fb1b120b51ea86b15034e.cu | /*
* cub_impl.cu
* Copyright (C) 2016- CloudBrain <byzhang@>
*/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include "cub_impl.h"
#include "distance.h"
#include <cub/cub.cuh>
#include <glog/logging.h>
using namespace cub;
using namespace std;
//static const uint32_t num_data_per_block = 1024;
static cub::CachingDeviceAllocator allocator_;
kNN_Impl_CUB::~kNN_Impl_CUB() {
}
kNN_Impl_CUB::kNN_Impl_CUB(const std::vector<uint32_t>& data, uint32_t num_data, uint32_t num_dim)
: kNN::impl(data, num_data, num_dim) {
}
#define CubDebugReturn(e, r) if (cub::Debug((e), __FILE__, __LINE__)) { return r; }
std::vector<uint32_t> kNN_Impl_CUB::search(const std::vector<uint32_t>& query, uint32_t top_k) {
std::vector<uint32_t> indexes{};
if (query.size() != num_dim_) {
LOG_EVERY_N(ERROR, 10000) << "size mismatch:"
<< "query = " << query.size()
<< ", num_dim = " << num_dim_;
return indexes;
}
// TODO: thread local
uint32_t* d_query = nullptr;
auto error = allocator_.DeviceAllocate((void**)&d_query, sizeof(uint32_t) * num_dim_);
if (error != cudaSuccess) {
LOG_EVERY_N(ERROR, 1000) << "error " << error << " when aollcating d_query:" << num_dim_;
return indexes;
}
CubDebugReturn(cudaMemcpy(d_query, query.data(), sizeof(uint32_t) * num_dim_, cudaMemcpyHostToDevice), indexes);
// TODO: thread local
DoubleBuffer<KEY_T> d_keys;
DoubleBuffer<uint32_t> d_values;
CubDebugReturn(allocator_.DeviceAllocate((void**)&d_keys.d_buffers[0], sizeof(KEY_T) * num_data_), indexes);
CubDebugReturn(allocator_.DeviceAllocate((void**)&d_keys.d_buffers[1], sizeof(KEY_T) * num_data_), indexes);
CubDebugReturn(allocator_.DeviceAllocate((void**)&d_values.d_buffers[0], sizeof(uint32_t) * num_data_), indexes);
CubDebugReturn(allocator_.DeviceAllocate((void**)&d_values.d_buffers[1], sizeof(uint32_t) * num_data_), indexes);
auto* keys = d_keys.d_buffers[0];
auto* values = d_values.d_buffers[0];
hamming_distance<<<tex_height_, num_data_per_block, num_dim_ * sizeof(uint32_t)>>>(keys, values, d_query, tex_, tex_height_, num_dim_, num_data_per_block, num_data_);
// Allocate temporary storage
size_t temp_storage_bytes = 0;
void *d_temp_storage = nullptr;
CubDebugReturn(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_data_), indexes);
CubDebugReturn(allocator_.DeviceAllocate(&d_temp_storage, temp_storage_bytes), indexes);
// Real sort
d_keys.selector = d_values.selector = 0;
CubDebugReturn(DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_data_), indexes);
cudaDeviceSynchronize();
// copy to host
indexes.resize(min(top_k, num_data_));
CubDebugReturn(cudaMemcpy(indexes.data(), d_values.Current(), sizeof(uint32_t) * indexes.size(), cudaMemcpyDeviceToHost), indexes);
// cleanup
if (d_keys.d_buffers[0]) CubDebugExit(allocator_.DeviceFree(d_keys.d_buffers[0]));
if (d_keys.d_buffers[1]) CubDebugExit(allocator_.DeviceFree(d_keys.d_buffers[1]));
if (d_values.d_buffers[0]) CubDebugExit(allocator_.DeviceFree(d_values.d_buffers[0]));
if (d_values.d_buffers[1]) CubDebugExit(allocator_.DeviceFree(d_values.d_buffers[1]));
if (d_temp_storage) CubDebugExit(allocator_.DeviceFree(d_temp_storage));
if (d_query) CubDebugExit(allocator_.DeviceFree(d_query));
return indexes;
}
|
ba716bc9ce4e28abbb495930bf0ab9dc555a6ae6.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../common.h"
static int reduceCPU(int *data, int n)
{
int sum = 0;
for (int i = 0; i < n; i++)
sum += data[i];
return sum;
}
const int n = (1 << 22); // number of elements to reduce
const int BLOCK_SIZE = 1024;
__global__
void reduceGPUShuffle(int* d_odata, int* d_idata, int n) {
// Get Current index for the thread.
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Access from global memory
int threadData = d_idata[index];
// Sync down within wrap
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
threadData += __shfl_down_sync(0xffffffff, threadData, offset);
}
/**
threadData += __shfl_down_sync(0xffffffff, threadData, 16);
threadData += __shfl_down_sync(0xffffffff, threadData, 8);
threadData += __shfl_down_sync(0xffffffff, threadData, 4);
threadData += __shfl_down_sync(0xffffffff, threadData, 2);
threadData += __shfl_down_sync(0xffffffff, threadData, 1);
*/
if (threadIdx.x % warpSize == 0) {
atomicAdd(&d_odata[blockIdx.x], threadData);
}
}
__global__
void reduceGPU(int* d_odata, int* d_idata, int n) {
__shared__ int blockReduce[BLOCK_SIZE];
// Get Current index for the thread.
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Coalesce read from global memory and write it to shared memory.
blockReduce[threadIdx.x] = d_idata[index];
// Sync threads
__syncthreads();
// Block level reduction
for(int offset = 1; offset < BLOCK_SIZE; offset <<= 1)
{
// Even thread
if((threadIdx.x % (offset << 1)) == 0)
{
blockReduce[threadIdx.x] += blockReduce[threadIdx.x + offset];
}
__syncthreads();
}
// Compute the reduce from shared memory.
if (threadIdx.x == 0) {
// And store back to global memory.
d_odata[blockIdx.x] = blockReduce[0];
}
}
int main()
{
// int n = (1 << 22); // number of elements to reduce
unsigned bytes = n * sizeof(int);
int *h_idata = (int *) malloc(bytes);
for (int i=0; i<n; i++)
h_idata[i] = (int)(rand() & 0xFF);
// TODO determine numBlocks and numThreads
int numBlocks = divup(n, BLOCK_SIZE);
int numThreads = BLOCK_SIZE;
// allocate device memory and data
int *d_idata = NULL, *d_odata = NULL;
CUDA(hipMalloc((void **) &d_idata, bytes));
CUDA(hipMalloc((void **) &d_odata, numBlocks*sizeof(int))); // FIX
// copy data to device memory
CUDA(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
int gpu_result = 0;
hipEvent_t start, stop;
CUDA(hipEventCreate(&start));
CUDA(hipEventCreate(&stop));
// Start record
CUDA(hipEventRecord(start, 0));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
//reduceGPU<<<numBlocks, numThreads>>>(d_odata, d_idata, n);
hipLaunchKernelGGL(( reduceGPUShuffle), dim3(numBlocks), dim3(numThreads), 0, 0, d_odata, d_idata, n);
// (2) reduce across all blocks
size_t block_bytes = numBlocks * sizeof(int);
int *h_blocks = (int *)malloc(block_bytes);
CUDA(hipMemcpy(h_blocks, d_odata, block_bytes, hipMemcpyDeviceToHost));
for (int i = 0; i < numBlocks; ++i)
gpu_result += h_blocks[i];
CUDA(hipEventRecord(stop, 0));
CUDA(hipEventSynchronize(stop));
float time_ms;
CUDA(hipEventElapsedTime(&time_ms, start, stop)); // that's the time your kernel took to run in ms!
printf("bandwidth %.2f GB/s elements %u blocks %u threads %u time_in_kernel %.4f ms\n",
1e-9 * bytes/(time_ms/1e3), n, numBlocks, numThreads, time_ms);
// check result against CPU
int cpu_result = reduceCPU(h_idata, n);
printf("gpu %u cpu %u ", gpu_result, cpu_result);
printf((gpu_result==cpu_result) ? "pass\n" : "FAIL\n");
printf("Time to run kernel %.4f ms\n", time_ms);
// cleanup
CUDA(hipEventDestroy(start));
CUDA(hipEventDestroy(stop));
free(h_idata);
CUDA(hipFree(d_idata));
CUDA(hipFree(d_odata));
return 0;
}
| ba716bc9ce4e28abbb495930bf0ab9dc555a6ae6.cu | #include <cuda_runtime.h>
#include "../common.h"
static int reduceCPU(int *data, int n)
{
int sum = 0;
for (int i = 0; i < n; i++)
sum += data[i];
return sum;
}
const int n = (1 << 22); // number of elements to reduce
const int BLOCK_SIZE = 1024;
__global__
void reduceGPUShuffle(int* d_odata, int* d_idata, int n) {
// Get Current index for the thread.
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Access from global memory
int threadData = d_idata[index];
// Sync down within wrap
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
threadData += __shfl_down_sync(0xffffffff, threadData, offset);
}
/**
threadData += __shfl_down_sync(0xffffffff, threadData, 16);
threadData += __shfl_down_sync(0xffffffff, threadData, 8);
threadData += __shfl_down_sync(0xffffffff, threadData, 4);
threadData += __shfl_down_sync(0xffffffff, threadData, 2);
threadData += __shfl_down_sync(0xffffffff, threadData, 1);
*/
if (threadIdx.x % warpSize == 0) {
atomicAdd(&d_odata[blockIdx.x], threadData);
}
}
__global__
void reduceGPU(int* d_odata, int* d_idata, int n) {
__shared__ int blockReduce[BLOCK_SIZE];
// Get Current index for the thread.
int index = blockIdx.x * blockDim.x + threadIdx.x;
// Coalesce read from global memory and write it to shared memory.
blockReduce[threadIdx.x] = d_idata[index];
// Sync threads
__syncthreads();
// Block level reduction
for(int offset = 1; offset < BLOCK_SIZE; offset <<= 1)
{
// Even thread
if((threadIdx.x % (offset << 1)) == 0)
{
blockReduce[threadIdx.x] += blockReduce[threadIdx.x + offset];
}
__syncthreads();
}
// Compute the reduce from shared memory.
if (threadIdx.x == 0) {
// And store back to global memory.
d_odata[blockIdx.x] = blockReduce[0];
}
}
int main()
{
// int n = (1 << 22); // number of elements to reduce
unsigned bytes = n * sizeof(int);
int *h_idata = (int *) malloc(bytes);
for (int i=0; i<n; i++)
h_idata[i] = (int)(rand() & 0xFF);
// TODO determine numBlocks and numThreads
int numBlocks = divup(n, BLOCK_SIZE);
int numThreads = BLOCK_SIZE;
// allocate device memory and data
int *d_idata = NULL, *d_odata = NULL;
CUDA(cudaMalloc((void **) &d_idata, bytes));
CUDA(cudaMalloc((void **) &d_odata, numBlocks*sizeof(int))); // FIX
// copy data to device memory
CUDA(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
int gpu_result = 0;
cudaEvent_t start, stop;
CUDA(cudaEventCreate(&start));
CUDA(cudaEventCreate(&stop));
// Start record
CUDA(cudaEventRecord(start, 0));
// TODO call your reduce kernel(s) with the right parameters
// INPUT: d_idata
// OUTPUT: d_odata
// ELEMENTS: n
// (1) reduce across all elements
//reduceGPU<<<numBlocks, numThreads>>>(d_odata, d_idata, n);
reduceGPUShuffle<<<numBlocks, numThreads>>>(d_odata, d_idata, n);
// (2) reduce across all blocks
size_t block_bytes = numBlocks * sizeof(int);
int *h_blocks = (int *)malloc(block_bytes);
CUDA(cudaMemcpy(h_blocks, d_odata, block_bytes, cudaMemcpyDeviceToHost));
for (int i = 0; i < numBlocks; ++i)
gpu_result += h_blocks[i];
CUDA(cudaEventRecord(stop, 0));
CUDA(cudaEventSynchronize(stop));
float time_ms;
CUDA(cudaEventElapsedTime(&time_ms, start, stop)); // that's the time your kernel took to run in ms!
printf("bandwidth %.2f GB/s elements %u blocks %u threads %u time_in_kernel %.4f ms\n",
1e-9 * bytes/(time_ms/1e3), n, numBlocks, numThreads, time_ms);
// check result against CPU
int cpu_result = reduceCPU(h_idata, n);
printf("gpu %u cpu %u ", gpu_result, cpu_result);
printf((gpu_result==cpu_result) ? "pass\n" : "FAIL\n");
printf("Time to run kernel %.4f ms\n", time_ms);
// cleanup
CUDA(cudaEventDestroy(start));
CUDA(cudaEventDestroy(stop));
free(h_idata);
CUDA(cudaFree(d_idata));
CUDA(cudaFree(d_odata));
return 0;
}
|
0dbe64f11eba69fcf1b12003931e0fef3e2fe613.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THH.h>
#include <THHGeneral.h>
#include <stdbool.h>
#include <stdio.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_Resample2d_updateOutput(const int n, const float* input1, const long4 input1_size, const long4 input1_stride,
const float* input2, const long4 input2_size, const long4 input2_stride, float* output, const long4 output_size, const long4 output_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
float val = 0.0;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
float dx = DIM3_INDEX(input2, b, 0, y, x);
float dy = DIM3_INDEX(input2, b, 1, y, x);
float xf = float(x) + dx;
float yf = float(y) + dy;
float alpha = xf - floor(xf); // alpha
float beta = yf - floor(yf); // beta
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
val += (1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx);
val += (alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx);
val += (1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx);
val += (alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx);
}
}
output[index] = val;
}
__global__ void kernel_Resample2d_backward_input1(
const int n, const float* input1, const long4 input1_size, const long4 input1_stride, const float* input2, const long4 input2_size, const long4 input2_stride,
const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, float* gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
float dx = DIM3_INDEX(input2, b, 0, y, x);
float dy = DIM3_INDEX(input2, b, 1, y, x);
float xf = float(x) + dx;
float yf = float(y) + dy;
float alpha = xf - int(xf); // alpha
float beta = yf - int(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
int xL = max(min( int (floor(xf)), idim_w-1), 0);
int xR = max(min( int (floor(xf)+1), idim_w -1), 0);
int yT = max(min( int (floor(yf)), idim_h-1), 0);
int yB = max(min( int (floor(yf)+1), idim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
__global__ void kernel_Resample2d_backward_input2(
const int n, const float* input1, const long4 input1_size, const long4 input1_stride, const float* input2, const long4 input2_size, const long4 input2_stride,
const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, float* gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
float output = 0.0;
int kernel_rad = (kernel_size - 1)/2;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
float dx = DIM3_INDEX(input2, b, 0, y, x);
float dy = DIM3_INDEX(input2, b, 1, y, x);
float xf = float(x) + dx;
float yf = float(y) + dy;
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
if (c % 2) {
float gamma = 1 - (xf - floor(xf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
}
}
}
}
else {
float gamma = 1 - (yf - floor(yf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
}
}
}
}
gradInput[index] = output;
}
void Resample2d_kernel_forward(THCState* state, THCudaTensor* input1, THCudaTensor* input2, THCudaTensor* output, int kernel_size) {
int n = 0;
const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]);
const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]);
const long4 input2_size = make_long4(input2->size[0], input2->size[1], input2->size[2], input2->size[3]);
const long4 input2_stride = make_long4(input2->stride[0], input2->stride[1], input2->stride[2], input2->stride[3]);
const long4 output_size = make_long4(output->size[0], output->size[1], output->size[2], output->size[3]);
const long4 output_stride = make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]);
n = THCudaTensor_nElement(state, output);
hipLaunchKernelGGL(( kernel_Resample2d_updateOutput), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride,
THCudaTensor_data(state, output), output_size, output_stride, kernel_size);
THCudaCheck(hipGetLastError());
}
void Resample2d_kernel_backward(THCState* state, THCudaTensor* input1, THCudaTensor* input2, THCudaTensor* gradOutput, THCudaTensor* gradInput1, THCudaTensor* gradInput2, int kernel_size) {
int n = 0;
const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]);
const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]);
const long4 input2_size = make_long4(input2->size[0], input2->size[1], input2->size[2], input2->size[3]);
const long4 input2_stride = make_long4(input2->stride[0], input2->stride[1], input2->stride[2], input2->stride[3]);
const long4 gradOutput_size = make_long4(gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]);
const long4 gradOutput_stride = make_long4(gradOutput->stride[0], gradOutput->stride[1], gradOutput->stride[2], gradOutput->stride[3]);
const long4 gradInput1_size = make_long4(gradInput1->size[0], gradInput1->size[1], gradInput1->size[2], gradInput1->size[3]);
const long4 gradInput1_stride = make_long4(gradInput1->stride[0], gradInput1->stride[1], gradInput1->stride[2], gradInput1->stride[3]);
n = THCudaTensor_nElement(state, gradOutput);
hipLaunchKernelGGL(( kernel_Resample2d_backward_input1), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride,
THCudaTensor_data(state, gradOutput), gradOutput_size, gradOutput_stride, THCudaTensor_data(state, gradInput1), gradInput1_size, gradInput1_stride, kernel_size
);
const long4 gradInput2_size = make_long4(gradInput2->size[0], gradInput2->size[1], gradInput2->size[2], gradInput2->size[3]);
const long4 gradInput2_stride = make_long4(gradInput2->stride[0], gradInput2->stride[1], gradInput2->stride[2], gradInput2->stride[3]);
n = THCudaTensor_nElement(state, gradInput2);
hipLaunchKernelGGL(( kernel_Resample2d_backward_input2), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride,
THCudaTensor_data(state, gradOutput), gradOutput_size, gradOutput_stride, THCudaTensor_data(state, gradInput2), gradInput2_size, gradInput2_stride, kernel_size
);
THCudaCheck(hipGetLastError());
}
#ifdef __cplusplus
}
#endif | 0dbe64f11eba69fcf1b12003931e0fef3e2fe613.cu | #include <THC.h>
#include <THCGeneral.h>
#include <stdbool.h>
#include <stdio.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
#ifdef __cplusplus
extern "C" {
#endif
__global__ void kernel_Resample2d_updateOutput(const int n, const float* input1, const long4 input1_size, const long4 input1_stride,
const float* input2, const long4 input2_size, const long4 input2_stride, float* output, const long4 output_size, const long4 output_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
float val = 0.0;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
float dx = DIM3_INDEX(input2, b, 0, y, x);
float dy = DIM3_INDEX(input2, b, 1, y, x);
float xf = float(x) + dx;
float yf = float(y) + dy;
float alpha = xf - floor(xf); // alpha
float beta = yf - floor(yf); // beta
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
val += (1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx);
val += (alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx);
val += (1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx);
val += (alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx);
}
}
output[index] = val;
}
__global__ void kernel_Resample2d_backward_input1(
const int n, const float* input1, const long4 input1_size, const long4 input1_stride, const float* input2, const long4 input2_size, const long4 input2_stride,
const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, float* gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
float dx = DIM3_INDEX(input2, b, 0, y, x);
float dy = DIM3_INDEX(input2, b, 1, y, x);
float xf = float(x) + dx;
float yf = float(y) + dy;
float alpha = xf - int(xf); // alpha
float beta = yf - int(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
int xL = max(min( int (floor(xf)), idim_w-1), 0);
int xR = max(min( int (floor(xf)+1), idim_w -1), 0);
int yT = max(min( int (floor(yf)), idim_h-1), 0);
int yB = max(min( int (floor(yf)+1), idim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
__global__ void kernel_Resample2d_backward_input2(
const int n, const float* input1, const long4 input1_size, const long4 input1_stride, const float* input2, const long4 input2_size, const long4 input2_stride,
const float* gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, float* gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
float output = 0.0;
int kernel_rad = (kernel_size - 1)/2;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
float dx = DIM3_INDEX(input2, b, 0, y, x);
float dy = DIM3_INDEX(input2, b, 1, y, x);
float xf = float(x) + dx;
float yf = float(y) + dy;
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
if (c % 2) {
float gamma = 1 - (xf - floor(xf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
}
}
}
}
else {
float gamma = 1 - (yf - floor(yf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
}
}
}
}
gradInput[index] = output;
}
void Resample2d_kernel_forward(THCState* state, THCudaTensor* input1, THCudaTensor* input2, THCudaTensor* output, int kernel_size) {
int n = 0;
const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]);
const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]);
const long4 input2_size = make_long4(input2->size[0], input2->size[1], input2->size[2], input2->size[3]);
const long4 input2_stride = make_long4(input2->stride[0], input2->stride[1], input2->stride[2], input2->stride[3]);
const long4 output_size = make_long4(output->size[0], output->size[1], output->size[2], output->size[3]);
const long4 output_stride = make_long4(output->stride[0], output->stride[1], output->stride[2], output->stride[3]);
n = THCudaTensor_nElement(state, output);
kernel_Resample2d_updateOutput<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride,
THCudaTensor_data(state, output), output_size, output_stride, kernel_size);
THCudaCheck(cudaGetLastError());
}
void Resample2d_kernel_backward(THCState* state, THCudaTensor* input1, THCudaTensor* input2, THCudaTensor* gradOutput, THCudaTensor* gradInput1, THCudaTensor* gradInput2, int kernel_size) {
int n = 0;
const long4 input1_size = make_long4(input1->size[0], input1->size[1], input1->size[2], input1->size[3]);
const long4 input1_stride = make_long4(input1->stride[0], input1->stride[1], input1->stride[2], input1->stride[3]);
const long4 input2_size = make_long4(input2->size[0], input2->size[1], input2->size[2], input2->size[3]);
const long4 input2_stride = make_long4(input2->stride[0], input2->stride[1], input2->stride[2], input2->stride[3]);
const long4 gradOutput_size = make_long4(gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]);
const long4 gradOutput_stride = make_long4(gradOutput->stride[0], gradOutput->stride[1], gradOutput->stride[2], gradOutput->stride[3]);
const long4 gradInput1_size = make_long4(gradInput1->size[0], gradInput1->size[1], gradInput1->size[2], gradInput1->size[3]);
const long4 gradInput1_stride = make_long4(gradInput1->stride[0], gradInput1->stride[1], gradInput1->stride[2], gradInput1->stride[3]);
n = THCudaTensor_nElement(state, gradOutput);
kernel_Resample2d_backward_input1<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride,
THCudaTensor_data(state, gradOutput), gradOutput_size, gradOutput_stride, THCudaTensor_data(state, gradInput1), gradInput1_size, gradInput1_stride, kernel_size
);
const long4 gradInput2_size = make_long4(gradInput2->size[0], gradInput2->size[1], gradInput2->size[2], gradInput2->size[3]);
const long4 gradInput2_stride = make_long4(gradInput2->stride[0], gradInput2->stride[1], gradInput2->stride[2], gradInput2->stride[3]);
n = THCudaTensor_nElement(state, gradInput2);
kernel_Resample2d_backward_input2<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
n, THCudaTensor_data(state, input1), input1_size, input1_stride, THCudaTensor_data(state, input2), input2_size, input2_stride,
THCudaTensor_data(state, gradOutput), gradOutput_size, gradOutput_stride, THCudaTensor_data(state, gradInput2), gradInput2_size, gradInput2_stride, kernel_size
);
THCudaCheck(cudaGetLastError());
}
#ifdef __cplusplus
}
#endif |
64087b51a2e11db626426482fa7cabfb943f4e41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "dataExtractor.h"
void showHolders(struct container * arr, int size) {
int A;
int B;
int op;
int result;
for (int i = 0; i < size; i++) {
A = arr[i].A;
B = arr[i].B;
op = arr[i].op;
result = arr[i].result;
printf("Holder %d,\nA: %d\nB: %d\nOperator: %d\nResult: %d\n\n", i, A, B, op, result);
}
}
struct container * getCSVData(char* file, int size)
{
FILE* my_file = fopen(file, "r");
if (my_file == NULL) {
return 0;
}
char line[10];
int count = 0;
struct container * holders = (struct container *)malloc(sizeof(struct container)*size);
char* tok;
int A;
int B;
int op;
while (fgets(line, 10, my_file))
{
char* tmp = strdup(line);
//holders[count] = (struct container*)malloc(sizeof(struct container));
tok = strtok(tmp, ",");
holders[count].A = atoi(tok);
tok = strtok(NULL, ",");
holders[count].B = atoi(tok);
tok = strtok(NULL, ",");
holders[count].op = atoi(tok);
holders[count].result = -1;
count++;
if (count > size) {
break;
}
}
//showHolders(holders, size);
return holders;
}
| 64087b51a2e11db626426482fa7cabfb943f4e41.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "dataExtractor.h"
void showHolders(struct container * arr, int size) {
int A;
int B;
int op;
int result;
for (int i = 0; i < size; i++) {
A = arr[i].A;
B = arr[i].B;
op = arr[i].op;
result = arr[i].result;
printf("Holder %d,\nA: %d\nB: %d\nOperator: %d\nResult: %d\n\n", i, A, B, op, result);
}
}
struct container * getCSVData(char* file, int size)
{
FILE* my_file = fopen(file, "r");
if (my_file == NULL) {
return 0;
}
char line[10];
int count = 0;
struct container * holders = (struct container *)malloc(sizeof(struct container)*size);
char* tok;
int A;
int B;
int op;
while (fgets(line, 10, my_file))
{
char* tmp = strdup(line);
//holders[count] = (struct container*)malloc(sizeof(struct container));
tok = strtok(tmp, ",");
holders[count].A = atoi(tok);
tok = strtok(NULL, ",");
holders[count].B = atoi(tok);
tok = strtok(NULL, ",");
holders[count].op = atoi(tok);
holders[count].result = -1;
count++;
if (count > size) {
break;
}
}
//showHolders(holders, size);
return holders;
}
|
b438b0bed7a7303d95f3b64c5499d445ee17af84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <string.h>
#include "config_flags.h"
#include "advance_mu_t_cu.h"
#define I3(i,k,j) ((j) * kdim * idim + (k) * idim + (i))
#define I2(i,j) ((j) * idim + (i))
#define min(a,b) ((a)<(b)?(a):(b))
#define max(a,b) ((a)>(b)?(a):(b))
__global__ void advance_mu_t_kernel( float * __restrict__ ww, float * __restrict__ ww_1, float * __restrict__ u, float * __restrict__ u_1,
float *v, float *v_1,
float * __restrict__ mu, float * __restrict__ mut, float * __restrict__ muave, float * __restrict__ muts,
float * __restrict__ muu,float * __restrict__ muv,
float * __restrict__ mudf, float * __restrict__ t, float * __restrict__ t_1,
float * __restrict__ t_ave, float * __restrict__ ft, float * __restrict__ mu_tend,
float rdx, float rdy, float dts, float epssm,
float * __restrict__ dnw, float * __restrict__ fnm, float * __restrict__ fnp, float * __restrict__ rdnw,
float * __restrict__ msfuy, float * __restrict__ msfvx_inv,
float * __restrict__ msftx, float * __restrict__ msfty,
float * __restrict__ wdtn, float * __restrict__ dvdxi, float * __restrict__ dmdt,
config_flags config,
int ids, int ide, int jds, int jde, int kds, int kde,
int idim, int jdim, int kdim,
int its, int ite, int jts, int jte, int kts, int kte )
{
int i, j, k;
int i_start, i_end, j_start, j_end, k_start, k_end;
//<DESCRIPTION>
//
// advance_mu_t advances the explicit perturbation theta equation and the mass
// conservation equation. In addition, the small timestep omega is updated,
// and some quantities needed in other places are squirrelled away.
//
//</DESCRIPTION>
// now, the real work.
// set the loop bounds taking into account boundary conditions.
i_start = its;
i_end = min(ite,ide-1);
j_start = jts;
j_end = min(jte,jde-1);
k_start = kts;
k_end = kte-1;
if( !config.periodic_x ){
if( config.specified || config.nested ) {
i_start = max(its,ids+1);
i_end = min(ite,ide-2);
}
}
if( config.specified || config.nested ) {
j_start = max(jts,jds+1);
j_end = min(jte,jde-2);
}
// CALCULATION OF WW (dETA/dt)
i = threadIdx.x + blockIdx.x * blockDim.x;
j = blockIdx.y;
if (j >= j_start && j <= j_end){
if (i >= i_start && i <= i_end){
float msftx_r = msftx[I2(i,j)];
float msfty_r = msfty[I2(i,j)];
float muv_r = muv[I2(i,j)];
float muv_r_1 = muv[I2(i,j+1)];
float msfvx_inv_r = msfvx_inv[I2(i,j)];
float msfvx_inv_r_1 = msfvx_inv[I2(i,j+1)];
float muu_r = muu[I2(i,j)];
float muu_r_1 = muu[I2(i+1,j)];
float msfuy_r = msfuy[I2(i,j)];
float msfuy_r_1 = msfuy[I2(i+1,j)];
dmdt[I2(i,j)] = 0.0f;
// NOTE: mu is not coupled with the map scale factor.
// ww (omega) IS coupled with the map scale factor.
// Being coupled with the map scale factor means
// multiplication by (1/msft) in this case.
// Comments on map scale factors
// ADT eqn 47:
// partial drho/dt = -mx*my[partial d/dx(rho u/my) + partial d/dy(rho v/mx)]
// -partial d/dz(rho w)
// with rho -> mu, dividing by my, and with partial d/dnu(rho nu/my [=ww])
// as the final term (because we're looking for d_nu_/dt)
//
// begin by integrating with respect to nu from bottom to top
// BCs are ww=0 at both
// final term gives 0
// first term gives Integral([1/my]partial d mu/dt) over total column = dm/dt
// RHS remaining is Integral(-mx[partial d/dx(mu u/my) +
// partial d/dy(mu v/mx)]) over column
// lines below find RHS terms at each level set dmdt = sum over all levels
//
// [don't divide the below by msfty until find ww, since dmdt is used in
// the meantime]
for(k = k_start; k <= k_end; k++){
dvdxi[I3(i,k,j)] = msftx_r*msfty_r*(
rdy*( (v[I3(i,k,j+1)]+muv_r_1*v_1[I3(i,k,j+1)]*msfvx_inv_r_1)
-(v[I3(i,k,j )]+muv_r *v_1[I3(i,k,j )]*msfvx_inv_r) )
+rdx*( (u[I3(i+1,k,j)]+muu_r_1*u_1[I3(i+1,k,j)]/msfuy_r_1)
-(u[I3(i ,k,j)]+muu_r *u_1[I3(i ,k,j)]/msfuy_r) ));
dmdt[I2(i,j)] = dmdt[I2(i,j)] + dnw[k]*dvdxi[I3(i,k,j)];
}
muave[I2(i,j)] = mu[I2(i,j)];
mu[I2(i,j)] = mu[I2(i,j)]+dts*(dmdt[I2(i,j)]+mu_tend[I2(i,j)]);
mudf[I2(i,j)] = (dmdt[I2(i,j)]+mu_tend[I2(i,j)]); // save tendency for div damp filter
muts[I2(i,j)] = mut[I2(i,j)]+mu[I2(i,j)];
muave[I2(i,j)] = 0.5f*((1.0f+epssm)*mu[I2(i,j)]+(1.0f-epssm)*muave[I2(i,j)]);
for(k = 1; k <= k_end; k++){
ww[I3(i,k,j)] = ww[I3(i,k-1,j)]-dnw[k-1]*(dmdt[I2(i,j)]+dvdxi[I3(i,k-1,j)]
+mu_tend[I2(i,j)])/msfty_r;
}
// NOTE: ww_1 (large timestep ww) is already coupled with the
// map scale factor
for(k = 0; k <= k_end; k++){
ww[I3(i,k,j)] = ww[I3(i,k,j)]-ww_1[I3(i,k,j)];
}
// CALCULATION OF THETA
// NOTE: theta'' is not coupled with the map-scale factor,
// while the theta'' tendency is coupled (i.e., mult by 1/msft)
// Comments on map scale factors
// BUT NOTE THAT both are mass coupled
// in flux form equations (Klemp et al.) Theta = mu*theta
//
// scalar eqn: partial d/dt(rho q/my) = -mx[partial d/dx(q rho u/my) +
// partial d/dy(q rho v/mx)]
// - partial d/dz(q rho w/my)
// with rho -> mu, and with partial d/dnu(q rho nu/my) as the final term
//
// adding previous tendency contribution which was map scale factor coupled
// (had been divided by msfty)
// need to uncouple before updating uncoupled Theta (by adding)
for(k = 0; k <= k_end; k++){
t_ave[I3(i,k,j)] = t[I3(i,k,j)];
t[I3(i,k,j)] = t[I3(i,k,j)] + msfty_r*dts*ft[I3(i,k,j)];
}
wdtn[I3(i,0,j)] = 0.0f;
wdtn[I3(i,kde,j)] = 0.0f;
for(k = 1; k <= k_end; k++){
// for scalar eqn RHS term 3
wdtn[I3(i,k,j)] = ww[I3(i,k,j)]*(fnm[k]*t_1[I3(i,k,j)]+fnp[k]
*t_1[I3(i,k-1,j)]);
}
// scalar eqn, RHS terms 1, 2 and 3
// multiply by msfty to uncouple result for Theta from map scale factor
for(k = 0; k <= k_end; k++){
// multiplication by msfty uncouples result for Theta
t[I3(i,k,j)] = t[I3(i,k,j)] - dts*msfty_r*(
// multiplication by mx needed for RHS terms 1 & 2
msftx_r*(
0.5f*rdy*
( v[I3(i,k,j+1)]*(t_1[I3(i,k,j+1)]+t_1[I3(i,k,j )])
-v[I3(i,k,j )]*(t_1[I3(i,k,j )]+t_1[I3(i,k,j-1)]) )
+ 0.5f*rdx*
( u[I3(i+1,k,j)]*(t_1[I3(i+1,k,j)]+t_1[I3(i ,k,j)])
-u[I3(i ,k,j)]*(t_1[I3(i ,k,j)]+t_1[I3(i-1,k,j)]) ) )
+ rdnw[k]*( wdtn[I3(i,k+1,j)]-wdtn[I3(i,k,j)] ) );
}
} // i
} // j
}
| b438b0bed7a7303d95f3b64c5499d445ee17af84.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#include <string.h>
#include "config_flags.h"
#include "advance_mu_t_cu.h"
#define I3(i,k,j) ((j) * kdim * idim + (k) * idim + (i))
#define I2(i,j) ((j) * idim + (i))
#define min(a,b) ((a)<(b)?(a):(b))
#define max(a,b) ((a)>(b)?(a):(b))
__global__ void advance_mu_t_kernel( float * __restrict__ ww, float * __restrict__ ww_1, float * __restrict__ u, float * __restrict__ u_1,
float *v, float *v_1,
float * __restrict__ mu, float * __restrict__ mut, float * __restrict__ muave, float * __restrict__ muts,
float * __restrict__ muu,float * __restrict__ muv,
float * __restrict__ mudf, float * __restrict__ t, float * __restrict__ t_1,
float * __restrict__ t_ave, float * __restrict__ ft, float * __restrict__ mu_tend,
float rdx, float rdy, float dts, float epssm,
float * __restrict__ dnw, float * __restrict__ fnm, float * __restrict__ fnp, float * __restrict__ rdnw,
float * __restrict__ msfuy, float * __restrict__ msfvx_inv,
float * __restrict__ msftx, float * __restrict__ msfty,
float * __restrict__ wdtn, float * __restrict__ dvdxi, float * __restrict__ dmdt,
config_flags config,
int ids, int ide, int jds, int jde, int kds, int kde,
int idim, int jdim, int kdim,
int its, int ite, int jts, int jte, int kts, int kte )
{
int i, j, k;
int i_start, i_end, j_start, j_end, k_start, k_end;
//<DESCRIPTION>
//
// advance_mu_t advances the explicit perturbation theta equation and the mass
// conservation equation. In addition, the small timestep omega is updated,
// and some quantities needed in other places are squirrelled away.
//
//</DESCRIPTION>
// now, the real work.
// set the loop bounds taking into account boundary conditions.
i_start = its;
i_end = min(ite,ide-1);
j_start = jts;
j_end = min(jte,jde-1);
k_start = kts;
k_end = kte-1;
if( !config.periodic_x ){
if( config.specified || config.nested ) {
i_start = max(its,ids+1);
i_end = min(ite,ide-2);
}
}
if( config.specified || config.nested ) {
j_start = max(jts,jds+1);
j_end = min(jte,jde-2);
}
// CALCULATION OF WW (dETA/dt)
i = threadIdx.x + blockIdx.x * blockDim.x;
j = blockIdx.y;
if (j >= j_start && j <= j_end){
if (i >= i_start && i <= i_end){
float msftx_r = msftx[I2(i,j)];
float msfty_r = msfty[I2(i,j)];
float muv_r = muv[I2(i,j)];
float muv_r_1 = muv[I2(i,j+1)];
float msfvx_inv_r = msfvx_inv[I2(i,j)];
float msfvx_inv_r_1 = msfvx_inv[I2(i,j+1)];
float muu_r = muu[I2(i,j)];
float muu_r_1 = muu[I2(i+1,j)];
float msfuy_r = msfuy[I2(i,j)];
float msfuy_r_1 = msfuy[I2(i+1,j)];
dmdt[I2(i,j)] = 0.0f;
// NOTE: mu is not coupled with the map scale factor.
// ww (omega) IS coupled with the map scale factor.
// Being coupled with the map scale factor means
// multiplication by (1/msft) in this case.
// Comments on map scale factors
// ADT eqn 47:
// partial drho/dt = -mx*my[partial d/dx(rho u/my) + partial d/dy(rho v/mx)]
// -partial d/dz(rho w)
// with rho -> mu, dividing by my, and with partial d/dnu(rho nu/my [=ww])
// as the final term (because we're looking for d_nu_/dt)
//
// begin by integrating with respect to nu from bottom to top
// BCs are ww=0 at both
// final term gives 0
// first term gives Integral([1/my]partial d mu/dt) over total column = dm/dt
// RHS remaining is Integral(-mx[partial d/dx(mu u/my) +
// partial d/dy(mu v/mx)]) over column
// lines below find RHS terms at each level set dmdt = sum over all levels
//
// [don't divide the below by msfty until find ww, since dmdt is used in
// the meantime]
for(k = k_start; k <= k_end; k++){
dvdxi[I3(i,k,j)] = msftx_r*msfty_r*(
rdy*( (v[I3(i,k,j+1)]+muv_r_1*v_1[I3(i,k,j+1)]*msfvx_inv_r_1)
-(v[I3(i,k,j )]+muv_r *v_1[I3(i,k,j )]*msfvx_inv_r) )
+rdx*( (u[I3(i+1,k,j)]+muu_r_1*u_1[I3(i+1,k,j)]/msfuy_r_1)
-(u[I3(i ,k,j)]+muu_r *u_1[I3(i ,k,j)]/msfuy_r) ));
dmdt[I2(i,j)] = dmdt[I2(i,j)] + dnw[k]*dvdxi[I3(i,k,j)];
}
muave[I2(i,j)] = mu[I2(i,j)];
mu[I2(i,j)] = mu[I2(i,j)]+dts*(dmdt[I2(i,j)]+mu_tend[I2(i,j)]);
mudf[I2(i,j)] = (dmdt[I2(i,j)]+mu_tend[I2(i,j)]); // save tendency for div damp filter
muts[I2(i,j)] = mut[I2(i,j)]+mu[I2(i,j)];
muave[I2(i,j)] = 0.5f*((1.0f+epssm)*mu[I2(i,j)]+(1.0f-epssm)*muave[I2(i,j)]);
for(k = 1; k <= k_end; k++){
ww[I3(i,k,j)] = ww[I3(i,k-1,j)]-dnw[k-1]*(dmdt[I2(i,j)]+dvdxi[I3(i,k-1,j)]
+mu_tend[I2(i,j)])/msfty_r;
}
// NOTE: ww_1 (large timestep ww) is already coupled with the
// map scale factor
for(k = 0; k <= k_end; k++){
ww[I3(i,k,j)] = ww[I3(i,k,j)]-ww_1[I3(i,k,j)];
}
// CALCULATION OF THETA
// NOTE: theta'' is not coupled with the map-scale factor,
// while the theta'' tendency is coupled (i.e., mult by 1/msft)
// Comments on map scale factors
// BUT NOTE THAT both are mass coupled
// in flux form equations (Klemp et al.) Theta = mu*theta
//
// scalar eqn: partial d/dt(rho q/my) = -mx[partial d/dx(q rho u/my) +
// partial d/dy(q rho v/mx)]
// - partial d/dz(q rho w/my)
// with rho -> mu, and with partial d/dnu(q rho nu/my) as the final term
//
// adding previous tendency contribution which was map scale factor coupled
// (had been divided by msfty)
// need to uncouple before updating uncoupled Theta (by adding)
for(k = 0; k <= k_end; k++){
t_ave[I3(i,k,j)] = t[I3(i,k,j)];
t[I3(i,k,j)] = t[I3(i,k,j)] + msfty_r*dts*ft[I3(i,k,j)];
}
wdtn[I3(i,0,j)] = 0.0f;
wdtn[I3(i,kde,j)] = 0.0f;
for(k = 1; k <= k_end; k++){
// for scalar eqn RHS term 3
wdtn[I3(i,k,j)] = ww[I3(i,k,j)]*(fnm[k]*t_1[I3(i,k,j)]+fnp[k]
*t_1[I3(i,k-1,j)]);
}
// scalar eqn, RHS terms 1, 2 and 3
// multiply by msfty to uncouple result for Theta from map scale factor
for(k = 0; k <= k_end; k++){
// multiplication by msfty uncouples result for Theta
t[I3(i,k,j)] = t[I3(i,k,j)] - dts*msfty_r*(
// multiplication by mx needed for RHS terms 1 & 2
msftx_r*(
0.5f*rdy*
( v[I3(i,k,j+1)]*(t_1[I3(i,k,j+1)]+t_1[I3(i,k,j )])
-v[I3(i,k,j )]*(t_1[I3(i,k,j )]+t_1[I3(i,k,j-1)]) )
+ 0.5f*rdx*
( u[I3(i+1,k,j)]*(t_1[I3(i+1,k,j)]+t_1[I3(i ,k,j)])
-u[I3(i ,k,j)]*(t_1[I3(i ,k,j)]+t_1[I3(i-1,k,j)]) ) )
+ rdnw[k]*( wdtn[I3(i,k+1,j)]-wdtn[I3(i,k,j)] ) );
}
} // i
} // j
}
|
8437a581d89257ef2097d24d1341773eed1958e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "lodepng.h"
#include <stdio.h>
//#include <stdint.h> /* for uint64 definition */
//#include <time.h> /* for clock_gettime */
// #define BILLION 1000000000L
__global__ void invert( unsigned char* image_d ){
int id=blockIdx.x*blockDim.x+threadIdx.x;
image_d[id] = ~image_d[id];
}
int main( int argc, char ** argv){
// variabler til tidtaking
// uint64_t diff;
// struct timespec start_hele, start_minne, start_exe, end;
size_t pngsize;
unsigned char *png;
const char * filename = "lenna512x512_inv.png";
// clock_gettime(CLOCK_MONOTONIC, &start_hele); /* mark start time */
/* Read in the image */
lodepng_load_file(&png, &pngsize, filename);
unsigned char *image;
unsigned int width, height;
/* Decode it into a RGB 8-bit per channel vector */
unsigned int error = lodepng_decode24(&image, &width, &height, png, pngsize);
/* Check if read and decode of .png went well */
if(error != 0){
std::cout << "error " << error << ": " << lodepng_error_text(error) << std::endl;
}
// Do work
unsigned char* image_d;
size_t size;
size=height*width*3*sizeof(char);
hipMalloc((void**) &image_d, size);
// clock_gettime(CLOCK_MONOTONIC, &start_minne); /* mark start time */
// clock_gettime(CLOCK_MONOTONIC, &start_exe); /* mark start time */
hipMemcpy(image_d, image, size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
// clock_gettime(CLOCK_MONOTONIC, &end); /* mark the end time */
// diff = BILLION * (end.tv_sec - start_minne.tv_sec) + end.tv_nsec - start_minne.tv_nsec; printf("minne: elapsed time = %llu nanoseconds\n", (long long unsigned int) diff);
int threadBlock=1024;
int gridBlock=3*512*512/threadBlock;
hipLaunchKernelGGL(( invert), dim3(gridBlock), dim3(threadBlock), 0, 0, image_d);
hipMemcpy(image, image_d, size, hipMemcpyDeviceToHost);
// clock_gettime(CLOCK_MONOTONIC, &end); /* mark the end time */
// diff = BILLION * (end.tv_sec - start_exe.tv_sec) + end.tv_nsec - start_exe.tv_nsec; printf("exe: elapsed time = %llu nanoseconds\n", (long long unsigned int) diff);
/* Save the result to a new .png file */
lodepng_encode24_file("lenna512x512_orig.png", image , width,height);
/*clean up */
// clock_gettime(CLOCK_MONOTONIC, &end); /* mark the end time */
// diff = BILLION * (end.tv_sec - start_hele.tv_sec) + end.tv_nsec - start_hele.tv_nsec; printf("hele: elapsed time = %llu nanoseconds\n", (long long unsigned int) diff);
free(image); hipFree(image_d);
return 0;
}
| 8437a581d89257ef2097d24d1341773eed1958e2.cu | #include <iostream>
#include "lodepng.h"
#include <stdio.h>
//#include <stdint.h> /* for uint64 definition */
//#include <time.h> /* for clock_gettime */
// #define BILLION 1000000000L
__global__ void invert( unsigned char* image_d ){
int id=blockIdx.x*blockDim.x+threadIdx.x;
image_d[id] = ~image_d[id];
}
int main( int argc, char ** argv){
// variabler til tidtaking
// uint64_t diff;
// struct timespec start_hele, start_minne, start_exe, end;
size_t pngsize;
unsigned char *png;
const char * filename = "lenna512x512_inv.png";
// clock_gettime(CLOCK_MONOTONIC, &start_hele); /* mark start time */
/* Read in the image */
lodepng_load_file(&png, &pngsize, filename);
unsigned char *image;
unsigned int width, height;
/* Decode it into a RGB 8-bit per channel vector */
unsigned int error = lodepng_decode24(&image, &width, &height, png, pngsize);
/* Check if read and decode of .png went well */
if(error != 0){
std::cout << "error " << error << ": " << lodepng_error_text(error) << std::endl;
}
// Do work
unsigned char* image_d;
size_t size;
size=height*width*3*sizeof(char);
cudaMalloc((void**) &image_d, size);
// clock_gettime(CLOCK_MONOTONIC, &start_minne); /* mark start time */
// clock_gettime(CLOCK_MONOTONIC, &start_exe); /* mark start time */
cudaMemcpy(image_d, image, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
// clock_gettime(CLOCK_MONOTONIC, &end); /* mark the end time */
// diff = BILLION * (end.tv_sec - start_minne.tv_sec) + end.tv_nsec - start_minne.tv_nsec; printf("minne: elapsed time = %llu nanoseconds\n", (long long unsigned int) diff);
int threadBlock=1024;
int gridBlock=3*512*512/threadBlock;
invert<<<gridBlock, threadBlock>>>(image_d);
cudaMemcpy(image, image_d, size, cudaMemcpyDeviceToHost);
// clock_gettime(CLOCK_MONOTONIC, &end); /* mark the end time */
// diff = BILLION * (end.tv_sec - start_exe.tv_sec) + end.tv_nsec - start_exe.tv_nsec; printf("exe: elapsed time = %llu nanoseconds\n", (long long unsigned int) diff);
/* Save the result to a new .png file */
lodepng_encode24_file("lenna512x512_orig.png", image , width,height);
/*clean up */
// clock_gettime(CLOCK_MONOTONIC, &end); /* mark the end time */
// diff = BILLION * (end.tv_sec - start_hele.tv_sec) + end.tv_nsec - start_hele.tv_nsec; printf("hele: elapsed time = %llu nanoseconds\n", (long long unsigned int) diff);
free(image); cudaFree(image_d);
return 0;
}
|
d5c273ae3559c5f005e4675f1a3c9a52eea1736b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <cstdlib>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
int main()
{
/*
* Assign values to these variables so that the output string below prints the
* requested properties of the currently active GPU.
*/
int deviceId;
int computeCapabilityMajor;
int computeCapabilityMinor;
int multiProcessorCount;
int warpSize;
/*
* There should be no need to modify the output string below.
*/
printf("Device ID: %d\nNumber of SMs: %d\nCompute Capability Major: %d\nCompute Capability Minor: %d\nWarp Size: %d\n", deviceId, multiProcessorCount, computeCapabilityMajor, computeCapabilityMinor, warpSize);
}
| d5c273ae3559c5f005e4675f1a3c9a52eea1736b.cu | #include <stdio.h>
#include <cstdlib>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
int main()
{
/*
* Assign values to these variables so that the output string below prints the
* requested properties of the currently active GPU.
*/
int deviceId;
int computeCapabilityMajor;
int computeCapabilityMinor;
int multiProcessorCount;
int warpSize;
/*
* There should be no need to modify the output string below.
*/
printf("Device ID: %d\nNumber of SMs: %d\nCompute Capability Major: %d\nCompute Capability Minor: %d\nWarp Size: %d\n", deviceId, multiProcessorCount, computeCapabilityMajor, computeCapabilityMinor, warpSize);
}
|
72599292cb54cc64c98650e44de2b9c10417a56d.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <numeric>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_cuda.h"
#include "helper_functions.h"
#include "cuda_rig.h"
#define BLOCK_SIZE 32
struct Memory {
float *sums;
float *a;
};
struct TestMem {
Memory *device;
Memory *host;
size_t size;
TestMem() {
device = new Memory();
host = new Memory();
}
~TestMem() {
delete[] host->sums;
delete[] host->a;
hipFree(device->sums);
hipFree(device->a);
delete device;
delete host;
}
};
__global__ void Kernel(float *sums, float *a, size_t size) {
unsigned int wgNumber = blockIdx.x;
unsigned int wgDimension = blockDim.x;
unsigned int threadNum = threadIdx.x;
unsigned int gid = wgNumber * wgDimension + threadNum;
int shift = gid;
float sum = 0.;
for (int i = 0; i < size; i++) {
sum += a[i] * a[i + shift];
}
sums[shift] = sum;
}
void test_init(void *mem) {
TestMem *data = static_cast<TestMem *>(mem);
FILE *fp = fopen("../signal.txt", "r");
if (fp == NULL) {
fprintf(stderr, "Cannot open file 'signal.txt'\n");
exit(1);
}
fscanf(fp, "%d", &(data->size));
data->host->a = new float[2 * data->size];
data->host->sums = new float[1 * data->size];
for (int i = 0; i < data->size; i++) {
fscanf(fp, "%f", &(data->host->a[i]));
data->host->a[i + data->size] =
data->host->a[i]; // duplicate the array
data->host->sums[i] = 1;
}
fclose(fp);
CudaRig::InitAndCopy(
reinterpret_cast<void **>(&data->device->a),
data->host->a,
2 * data->size * sizeof(float));
CUDA_CHECK_ERROR();
CudaRig::InitAndCopy(
reinterpret_cast<void **>(&data->device->sums),
data->host->sums,
data->size * sizeof(float));
CUDA_CHECK_ERROR();
}
// Main program.
int main(int argc, char *argv[]) {
int dev = findCudaDevice(argc, (const char **)argv);
TestMem *mem = new TestMem();
CudaRig autocorrelation(mem, test_init);
autocorrelation.Init();
// Set up the execution parameters.
dim3 threads(BLOCK_SIZE, 1, 1);
// Set the number of blocks.
int num_blocks = (mem->size / BLOCK_SIZE);
dim3 grid(num_blocks, 1, 1);
std::cout << "Threads: " << BLOCK_SIZE << " | " << "Grid: " << num_blocks << std::endl;
CudaTimer t;
CudaRig::StartCudaTimer(&t);
// Execute the kernel.
Kernel<< <grid, threads>>>
(mem->device->sums, mem->device->a, mem->size);
CudaRig::StopCudaTimer(&t);
CUDA_CHECK_ERROR();
float msec = 0.0f;
hipEventElapsedTime(&msec, t.start, t.stop);
CUDA_CHECK_ERROR();
std::cout << "Took " << msec / 1000.0F << " seconds."
<< std::endl;
// Copy result from the device to the host.
hipMemcpy(mem->host->sums,
mem->device->sums,
mem->size * sizeof(int),
hipMemcpyDeviceToHost);
CUDA_CHECK_ERROR();
hipDeviceSynchronize();
CUDA_CHECK_ERROR();
bool last_positive = mem->host->sums[1] >= 0;
std::vector<float> cross_zero;
for (int i = 1; i < mem->size; i++) {
// std::cout << mem->host->sums[i] << std::endl;
if ( mem->host->sums[i] >= 0 && !last_positive ) {
//std::cout << i << std::endl;
cross_zero.push_back(i);
} else if (mem->host->sums[i] < 0 && last_positive) {
//std::cout << i << std::endl;
cross_zero.push_back(i);
}
last_positive = mem->host->sums[i] >= 0;
}
float last_float = cross_zero[0];
float total_diff = 0;
int count = 0;
for (auto v : cross_zero) {
if (v - last_float > 10) {
total_diff += v - last_float;
count++;
}
last_float = v;
}
float average = total_diff / (float) count;
std::cout << average * 2.0 << std::endl;
std::ofstream outfile;
outfile.open("autocorrelation.csv");
for (int i = 0; i < 512; i++) {
outfile << i << "," << mem->host->sums[i] << std::endl;
}
outfile.close();
return 0;
}
| 72599292cb54cc64c98650e44de2b9c10417a56d.cu | // System includes
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <numeric>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_cuda.h"
#include "helper_functions.h"
#include "cuda_rig.h"
#define BLOCK_SIZE 32
struct Memory {
float *sums;
float *a;
};
struct TestMem {
Memory *device;
Memory *host;
size_t size;
TestMem() {
device = new Memory();
host = new Memory();
}
~TestMem() {
delete[] host->sums;
delete[] host->a;
cudaFree(device->sums);
cudaFree(device->a);
delete device;
delete host;
}
};
__global__ void Kernel(float *sums, float *a, size_t size) {
unsigned int wgNumber = blockIdx.x;
unsigned int wgDimension = blockDim.x;
unsigned int threadNum = threadIdx.x;
unsigned int gid = wgNumber * wgDimension + threadNum;
int shift = gid;
float sum = 0.;
for (int i = 0; i < size; i++) {
sum += a[i] * a[i + shift];
}
sums[shift] = sum;
}
void test_init(void *mem) {
TestMem *data = static_cast<TestMem *>(mem);
FILE *fp = fopen("../signal.txt", "r");
if (fp == NULL) {
fprintf(stderr, "Cannot open file 'signal.txt'\n");
exit(1);
}
fscanf(fp, "%d", &(data->size));
data->host->a = new float[2 * data->size];
data->host->sums = new float[1 * data->size];
for (int i = 0; i < data->size; i++) {
fscanf(fp, "%f", &(data->host->a[i]));
data->host->a[i + data->size] =
data->host->a[i]; // duplicate the array
data->host->sums[i] = 1;
}
fclose(fp);
CudaRig::InitAndCopy(
reinterpret_cast<void **>(&data->device->a),
data->host->a,
2 * data->size * sizeof(float));
CUDA_CHECK_ERROR();
CudaRig::InitAndCopy(
reinterpret_cast<void **>(&data->device->sums),
data->host->sums,
data->size * sizeof(float));
CUDA_CHECK_ERROR();
}
// Main program.
int main(int argc, char *argv[]) {
int dev = findCudaDevice(argc, (const char **)argv);
TestMem *mem = new TestMem();
CudaRig autocorrelation(mem, test_init);
autocorrelation.Init();
// Set up the execution parameters.
dim3 threads(BLOCK_SIZE, 1, 1);
// Set the number of blocks.
int num_blocks = (mem->size / BLOCK_SIZE);
dim3 grid(num_blocks, 1, 1);
std::cout << "Threads: " << BLOCK_SIZE << " | " << "Grid: " << num_blocks << std::endl;
CudaTimer t;
CudaRig::StartCudaTimer(&t);
// Execute the kernel.
Kernel<< <grid, threads>>>
(mem->device->sums, mem->device->a, mem->size);
CudaRig::StopCudaTimer(&t);
CUDA_CHECK_ERROR();
float msec = 0.0f;
cudaEventElapsedTime(&msec, t.start, t.stop);
CUDA_CHECK_ERROR();
std::cout << "Took " << msec / 1000.0F << " seconds."
<< std::endl;
// Copy result from the device to the host.
cudaMemcpy(mem->host->sums,
mem->device->sums,
mem->size * sizeof(int),
cudaMemcpyDeviceToHost);
CUDA_CHECK_ERROR();
cudaDeviceSynchronize();
CUDA_CHECK_ERROR();
bool last_positive = mem->host->sums[1] >= 0;
std::vector<float> cross_zero;
for (int i = 1; i < mem->size; i++) {
// std::cout << mem->host->sums[i] << std::endl;
if ( mem->host->sums[i] >= 0 && !last_positive ) {
//std::cout << i << std::endl;
cross_zero.push_back(i);
} else if (mem->host->sums[i] < 0 && last_positive) {
//std::cout << i << std::endl;
cross_zero.push_back(i);
}
last_positive = mem->host->sums[i] >= 0;
}
float last_float = cross_zero[0];
float total_diff = 0;
int count = 0;
for (auto v : cross_zero) {
if (v - last_float > 10) {
total_diff += v - last_float;
count++;
}
last_float = v;
}
float average = total_diff / (float) count;
std::cout << average * 2.0 << std::endl;
std::ofstream outfile;
outfile.open("autocorrelation.csv");
for (int i = 0; i < 512; i++) {
outfile << i << "," << mem->host->sums[i] << std::endl;
}
outfile.close();
return 0;
}
|
bfa428ba4346ea19d01553081b06ed2ae978145f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpupol_cuda.h"
#include "gpupol.h"
#include "gpupol_cuda_def.h"
#include "ringpol_cu.h"
int GPULibInit(SimProperties* sp, GPUDeviceState* devStates, GPULibContext* cudaContext){
gpuErrchk( hipGetDeviceCount(&(sp->nDevices)) );
double* perf = (double*)malloc(sizeof(double)*sp->nDevices);
hipDeviceProp_t prop;
double maxPerf=0;
for(int iDev=0; iDev<sp->nDevices; iDev++){
gpuErrchk( hipGetDeviceProperties(&prop, iDev) );
perf[iDev] = prop.clockRate*prop.multiProcessorCount;
if(perf[iDev]>maxPerf)
maxPerf=perf[iDev];
}
int nDev=0;
for(int iDev=0; iDev<sp->nDevices; iDev++){
if(perf[iDev]/maxPerf > 0.5)
cudaContext->devIds[nDev++]=iDev;
}
sp->nDevices = nDev;
free(perf);
return 0;
}
int GPULibLoadBuffers(SimProperties* sp, SimState* ss, GPUDeviceState * devStates, GPULibContext* cudaContext){
uint globalWs;
GPUDeviceState* curDev;
globalWs = sp->nwg*sp->ws;
CreateGPULattice(ss, sp);
for(int iDev=0; iDev<sp->nDevices; iDev++){
curDev = devStates + iDev;
gpuErrchk( hipSetDevice(cudaContext->devIds[iDev]) );
gpuErrchk( hipMalloc(&(curDev->seedBuf), sizeof(uint)*globalWs*2*sp->R) );
gpuErrchk( hipMalloc(&(curDev->latBuf), sizeof(uint)*sp->gpuLatSize) );
gpuErrchk( hipMalloc(&(curDev->transBuf), sizeof(uint)*4) );
gpuErrchk( hipMemcpy(curDev->seedBuf, ss[iDev].seeds, sizeof(uint)*globalWs*2*sp->R, hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(curDev->latBuf, ss[iDev].gpuLattice, sizeof(uint)*sp->gpuLatSize, hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(curDev->transBuf, sp->trans, sizeof(uint)*4, hipMemcpyHostToDevice) );
}
return 0;
}
int GPULibRelease(SimProperties* sp, GPUDeviceState* devStates, GPULibContext* cudaContext){
GPUDeviceState* curDev;
for(int iDev=0; iDev<sp->nDevices; iDev++){
curDev = devStates+iDev;
gpuErrchk( hipSetDevice(cudaContext->devIds[iDev]) );
gpuErrchk( hipFree(curDev->seedBuf) );
gpuErrchk( hipFree(curDev->latBuf) );
gpuErrchk( hipFree(curDev->transBuf) );
}
return 0;
}
int GPULatticeToCPU(SimProperties* sp, SimState* ss, GPUDeviceState* devStates, GPULibContext* cudaContext){
gpuErrchk( hipPeekAtLastError() );
for(int iDev=0; iDev<sp->nDevices; iDev++){
GPUDeviceState* curDev = devStates+iDev;
gpuErrchk( hipSetDevice(cudaContext->devIds[iDev]) );
gpuErrchk( hipMemcpy(ss[iDev].gpuLattice, curDev->latBuf, sizeof(uint)*sp->gpuLatSize, hipMemcpyDeviceToHost) );
}
for(int iDev=0; iDev<sp->nDevices; iDev++){
gpuErrchk( hipSetDevice(cudaContext->devIds[iDev]) );
gpuErrchk( hipDeviceSynchronize() );
}
CopyGPUToCPULattice();
GetAllRingPolymers();
for(int iDev=0; iDev<sp->nDevices; iDev++)
UpdatePolymerWithLabels(ss+iDev);
return 0;
}
int GPULibRun(SimProperties* sp, SimState* ss, GPUDeviceState* devStates, GPULibContext* cudaContext, int nTime){
int iDev;
int* tuvOff;
uint NWT, NWU, NWV;
int dt, du, dv;
GPUDeviceState* curDev;
tuvOff = (int*) malloc(sizeof(int)*sp->nDevices);
NWT=sp->nwt; NWU=sp->nwu; NWV=sp->nwv;
for(int i=0; i<nTime; i++){
for(iDev=0; iDev<sp->nDevices; iDev++){
curDev = devStates+iDev;
hipSetDevice(cudaContext->devIds[iDev]);
dt = WLT*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
du = WLU*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
dv = WLV*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
tuvOff[iDev] = dt + du*LCELL*WST + dv*LCELL*WSU*LCELL*WST;
hipLaunchKernelGGL(( polmove) , dim3(sp->nwg), dim3(sp->ws), 0 , 0, sp->nSteps, curDev->seedBuf, curDev->latBuf, curDev->transBuf, tuvOff[iDev], NWT, NWU, NWV);
}
}
GPULatticeToCPU(sp, ss, devStates, cudaContext);
return 0;
}
/*
int GPULibRun(SimProperties* sp, SimState* ss, GPUDeviceState* devStates, GPULibContext* cudaContext, int nTime){
int iDev;
uint tOff, uOff, vOff, tuvOff;
uint NWT, NWU, NWV;
GPUDeviceState* curDev;
NWT=sp->nwt; NWU=sp->nwu; NWV=sp->nwv;
for(int i=0; i<nTime; i++){
for(iDev=0; iDev<sp->nDevices; iDev++){
curDev = devStates+iDev;
hipSetDevice(cudaContext->devIds[iDev]);
tOff = (LCELL*WST)*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
uOff = (LCELL*WSU)*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
vOff = (LCELL*WSV)*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
tuvOff = tOff+uOff*LCELL*WST+vOff*LCELL*LCELL*WST*WSU;
// printf("nStep=%i\n", sp->nSteps);
hipLaunchKernelGGL(( polmove) , dim3(sp->nwg), dim3(sp->ws), 0 , 0, sp->nSteps, curDev->seedBuf, curDev->latBuf, curDev->transBuf, tuvOff, NWT, NWU, NWV);
}
}
GPULatticeToCPU(sp, ss, devStates, cudaContext);
return 0;
}*/
| bfa428ba4346ea19d01553081b06ed2ae978145f.cu | #include "gpupol_cuda.h"
#include "gpupol.h"
#include "gpupol_cuda_def.h"
#include "ringpol_cu.h"
int GPULibInit(SimProperties* sp, GPUDeviceState* devStates, GPULibContext* cudaContext){
gpuErrchk( cudaGetDeviceCount(&(sp->nDevices)) );
double* perf = (double*)malloc(sizeof(double)*sp->nDevices);
cudaDeviceProp prop;
double maxPerf=0;
for(int iDev=0; iDev<sp->nDevices; iDev++){
gpuErrchk( cudaGetDeviceProperties(&prop, iDev) );
perf[iDev] = prop.clockRate*prop.multiProcessorCount;
if(perf[iDev]>maxPerf)
maxPerf=perf[iDev];
}
int nDev=0;
for(int iDev=0; iDev<sp->nDevices; iDev++){
if(perf[iDev]/maxPerf > 0.5)
cudaContext->devIds[nDev++]=iDev;
}
sp->nDevices = nDev;
free(perf);
return 0;
}
int GPULibLoadBuffers(SimProperties* sp, SimState* ss, GPUDeviceState * devStates, GPULibContext* cudaContext){
uint globalWs;
GPUDeviceState* curDev;
globalWs = sp->nwg*sp->ws;
CreateGPULattice(ss, sp);
for(int iDev=0; iDev<sp->nDevices; iDev++){
curDev = devStates + iDev;
gpuErrchk( cudaSetDevice(cudaContext->devIds[iDev]) );
gpuErrchk( cudaMalloc(&(curDev->seedBuf), sizeof(uint)*globalWs*2*sp->R) );
gpuErrchk( cudaMalloc(&(curDev->latBuf), sizeof(uint)*sp->gpuLatSize) );
gpuErrchk( cudaMalloc(&(curDev->transBuf), sizeof(uint)*4) );
gpuErrchk( cudaMemcpy(curDev->seedBuf, ss[iDev].seeds, sizeof(uint)*globalWs*2*sp->R, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(curDev->latBuf, ss[iDev].gpuLattice, sizeof(uint)*sp->gpuLatSize, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(curDev->transBuf, sp->trans, sizeof(uint)*4, cudaMemcpyHostToDevice) );
}
return 0;
}
int GPULibRelease(SimProperties* sp, GPUDeviceState* devStates, GPULibContext* cudaContext){
GPUDeviceState* curDev;
for(int iDev=0; iDev<sp->nDevices; iDev++){
curDev = devStates+iDev;
gpuErrchk( cudaSetDevice(cudaContext->devIds[iDev]) );
gpuErrchk( cudaFree(curDev->seedBuf) );
gpuErrchk( cudaFree(curDev->latBuf) );
gpuErrchk( cudaFree(curDev->transBuf) );
}
return 0;
}
int GPULatticeToCPU(SimProperties* sp, SimState* ss, GPUDeviceState* devStates, GPULibContext* cudaContext){
gpuErrchk( cudaPeekAtLastError() );
for(int iDev=0; iDev<sp->nDevices; iDev++){
GPUDeviceState* curDev = devStates+iDev;
gpuErrchk( cudaSetDevice(cudaContext->devIds[iDev]) );
gpuErrchk( cudaMemcpy(ss[iDev].gpuLattice, curDev->latBuf, sizeof(uint)*sp->gpuLatSize, cudaMemcpyDeviceToHost) );
}
for(int iDev=0; iDev<sp->nDevices; iDev++){
gpuErrchk( cudaSetDevice(cudaContext->devIds[iDev]) );
gpuErrchk( cudaDeviceSynchronize() );
}
CopyGPUToCPULattice();
GetAllRingPolymers();
for(int iDev=0; iDev<sp->nDevices; iDev++)
UpdatePolymerWithLabels(ss+iDev);
return 0;
}
int GPULibRun(SimProperties* sp, SimState* ss, GPUDeviceState* devStates, GPULibContext* cudaContext, int nTime){
int iDev;
int* tuvOff;
uint NWT, NWU, NWV;
int dt, du, dv;
GPUDeviceState* curDev;
tuvOff = (int*) malloc(sizeof(int)*sp->nDevices);
NWT=sp->nwt; NWU=sp->nwu; NWV=sp->nwv;
for(int i=0; i<nTime; i++){
for(iDev=0; iDev<sp->nDevices; iDev++){
curDev = devStates+iDev;
cudaSetDevice(cudaContext->devIds[iDev]);
dt = WLT*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
du = WLU*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
dv = WLV*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
tuvOff[iDev] = dt + du*LCELL*WST + dv*LCELL*WSU*LCELL*WST;
polmove <<< sp->nwg, sp->ws, 0 >>> (sp->nSteps, curDev->seedBuf, curDev->latBuf, curDev->transBuf, tuvOff[iDev], NWT, NWU, NWV);
}
}
GPULatticeToCPU(sp, ss, devStates, cudaContext);
return 0;
}
/*
int GPULibRun(SimProperties* sp, SimState* ss, GPUDeviceState* devStates, GPULibContext* cudaContext, int nTime){
int iDev;
uint tOff, uOff, vOff, tuvOff;
uint NWT, NWU, NWV;
GPUDeviceState* curDev;
NWT=sp->nwt; NWU=sp->nwu; NWV=sp->nwv;
for(int i=0; i<nTime; i++){
for(iDev=0; iDev<sp->nDevices; iDev++){
curDev = devStates+iDev;
cudaSetDevice(cudaContext->devIds[iDev]);
tOff = (LCELL*WST)*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
uOff = (LCELL*WSU)*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
vOff = (LCELL*WSV)*(double)RNG_FAC*Rng4(&ss[iDev].rngState);
tuvOff = tOff+uOff*LCELL*WST+vOff*LCELL*LCELL*WST*WSU;
// printf("nStep=%i\n", sp->nSteps);
polmove <<< sp->nwg, sp->ws, 0 >>> (sp->nSteps, curDev->seedBuf, curDev->latBuf, curDev->transBuf, tuvOff, NWT, NWU, NWV);
}
}
GPULatticeToCPU(sp, ss, devStates, cudaContext);
return 0;
}*/
|
7a052c93c911efe6dfaabbf8e64c2399c79f5e58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//scan.cu
//#include "kernel.hip"
#include "comm.h"
#include "wtime.h"
#include "iostream"
#define max_thd 256
#define max_block 256
#define thread_limit 256
#define block_limit 1024
#define GPU_COWORKER 1
graph * mygraph;
__global__ void block_binary_kernel
( vertex_t* head,
vertex_t* adj,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
vertex_t A = head[tid];
vertex_t B = adj[tid];
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
count[blockIdx.x]+=val;
// count[blockIdx.x]=val;
}
}
__global__ void warp_binary_kernel
( vertex_t* head,
vertex_t* adj,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = head[tid];
vertex_t B = adj[tid];
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
tid += GPU_COWORKER* blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
count[blockIdx.x]=val;
}
__syncthreads();
}
//----------------------------------------------------------------------------------------
__global__ void classify_kernel //step 1: classify the edge list into different arrays
( vertex_t* adj_list,
vertex_t* head_list,
index_t* begin,
index_t N, //inputs
index_t* small_num,
index_t* mid_num,
index_t* large_num
//outputs: small/large head, adjacent, and number by thread
)
{
int tid = threadIdx.x +blockIdx.x*blockDim.x;
index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1;
index_t thd_base = tid*bin_size; //start point of threads space
index_t small_offset=0;
index_t mid_offset=0;
index_t large_offset=0;
//temp variables
vertex_t head;
vertex_t adj;
index_t m;
index_t n;
for(index_t i=0; i<bin_size; i++){
index_t id = thd_base + i;
if(id<N){
head = head_list[id];
adj = adj_list[id];
m = begin[head+1]-begin[head];//degree[head];
n = begin[adj+1]-begin[adj];//degree[adj];
if(m<n){
n=m;
}
if(n<thread_limit){
small_offset++;
}
// else if(n<block_limit){ //could be more then 2 catigories
else{
mid_offset++;
}
/*
else { //could be more then 2 catigories
large_offset++;
}
*/
}
}
small_num[tid] = small_offset;
mid_num[tid] = mid_offset;
large_num[tid] = large_offset;
}
__global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256
(
index_t* data,
index_t* block_offset
)
{
//step 1: each block do prefix sum inside
int tid = threadIdx.x +blockIdx.x*blockDim.x;
__shared__ index_t temp_in[256];
temp_in[threadIdx.x] = data[tid];
__syncthreads();
index_t val=0;
for(int i=0; i<=threadIdx.x; i++){
val += temp_in[i];
}
__syncthreads();
if(threadIdx.x==255){
block_offset[blockIdx.x] = val;
}
data[tid] = val;
__syncthreads();
}
__global__ void prefix_kernel_2
(
index_t* block_offset
)
{
//step 2: collect each block's offset and do prefix for this set
__shared__ index_t temp_in[256];
temp_in[threadIdx.x] = block_offset[threadIdx.x];
__syncthreads();
index_t val=0;
for(int i=0; i<threadIdx.x; i++){
val += temp_in[i];
}
// val = temp_in[threadIdx.x];
block_offset[threadIdx.x] = val;
__syncthreads();
}
__global__ void prefix_kernel_3
(
index_t* data,
index_t* block_offset
)
{
//step 3: update by adding block offset
int tid = threadIdx.x + blockIdx.x*blockDim.x;
index_t val = data[tid];
index_t offset = block_offset[blockIdx.x];
val += offset;
data[tid] = val;
__syncthreads();
}
__global__ void collect_kernel
( vertex_t* adj_list,
vertex_t* head_list,
index_t* begin,
index_t N,
index_t* small_num,
index_t* mid_num,
index_t* large_num,
index_t N1,
index_t N2,
vertex_t* dest_head,
vertex_t* dest_adj
)
{
int tid = threadIdx.x +blockIdx.x*blockDim.x;
index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1;
index_t thd_base = tid*bin_size; //start point of threads space
index_t thd_base_small = 0;
index_t thd_base_mid = N1;
index_t thd_base_large = N1+N2;
if(tid!=0){
thd_base_small = small_num[tid-1];
thd_base_mid = N1 + mid_num[tid-1];
thd_base_large = N1 + N2 + large_num[tid-1];
}
//temp variables
vertex_t head;
vertex_t adj;
index_t m;
index_t n;
index_t small_offset = thd_base_small;
index_t mid_offset = thd_base_mid;
index_t large_offset = thd_base_large;
for(index_t i=0; i<bin_size; i++){
index_t id = thd_base + i;
if(id<N){
head = head_list[id];
adj = adj_list[id];
m = begin[head+1]-begin[head];//degree[head];
n = begin[adj+1]-begin[adj];//degree[adj];
if(m<n){
n=m;
}
if(n<thread_limit){
dest_head[small_offset] = head;
dest_adj [small_offset] = adj;
small_offset++;
}
// else if(n<block_limit){ //could be more then 2 catigories
else{
dest_head[mid_offset] = head;
dest_adj [mid_offset] = adj;
mid_offset++;
}
/* else { //could be more then 2 catigories
dest_head[large_offset] = head;
dest_adj [large_offset] = adj;
large_offset++;
}
*/
}
}
}
__global__ void reduce_kernel2(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void* part_scan(void * data){
index_t thd_count=0;
int GPU_id = *(int*)data;
int i = GPU_id;
// cout<<"GPU id = "<<GPU_id<<"\n";
hipSetDevice(GPU_id);
H_ERR(hipDeviceSynchronize() );
vertex_t* dev_adj;
vertex_t* dev_head;
index_t* dev_begin;
index_t* dev_count;
index_t partEdgeCount = mygraph->partEdgeCount[i];
vertex_t vert_count = mygraph->vert_count;
vertex_t* partAdj = mygraph->partAdj[i];
vertex_t* partHead= mygraph->partHead[i];
// index_t* partDegree = mygraph->partDegree[i];
index_t* partBegin = mygraph->partBegin[i];
index_t* count = mygraph->count;
H_ERR(hipMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) );
H_ERR(hipMalloc(&dev_head, partEdgeCount*sizeof(vertex_t)) );
// H_ERR(hipMalloc(&dev_degree, vert_count*sizeof(index_t)) );
H_ERR(hipMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) );
index_t* block_offset;
H_ERR(hipMalloc(&block_offset, max_block*sizeof(index_t)) );
H_ERR(hipMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
H_ERR(hipMemcpy(dev_head, partHead, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
// H_ERR(hipMemcpy(dev_degree, partDegree, vert_count*sizeof(index_t), hipMemcpyHostToDevice) );
H_ERR(hipMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), hipMemcpyHostToDevice) );
double time2=wtime();
for(int j=0; j<PART_NUM; j++){
index_t totalEdgeCount = mygraph->partEdgeCount[j];
vertex_t* head = mygraph->partHead[j];
vertex_t* adj = mygraph->partAdj[j];
// index_t* degree = mygraph->partDegree[j];
vertex_t* classified_head;
vertex_t* classified_adj;
index_t* small_num;
index_t* mid_num;
index_t* large_num;
vertex_t* src_head;
vertex_t* src_adj;
// index_t* src_degree;
H_ERR(hipMalloc(&small_num, max_thd*max_block*sizeof(index_t)) );
H_ERR(hipMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) );
H_ERR(hipMalloc(&large_num, max_thd*max_block*sizeof(index_t)) );
H_ERR(hipMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) );
H_ERR(hipMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) );
// H_ERR(hipMalloc(&src_degree, vert_count*sizeof(index_t)) );
H_ERR(hipMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
H_ERR(hipMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) );
// H_ERR(hipMemcpy(src_degree, degree, vert_count*sizeof(index_t), hipMemcpyHostToDevice) );
H_ERR(hipMalloc(&classified_head, totalEdgeCount*sizeof(vertex_t)) );
H_ERR(hipMalloc(&classified_adj, totalEdgeCount*sizeof(vertex_t)) );
//
double time1=wtime();
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( classify_kernel) , dim3(max_block),dim3(max_thd), 0, 0,
src_adj,
src_head,
dev_begin,
totalEdgeCount,
small_num,
mid_num,
large_num
);
H_ERR(hipDeviceSynchronize() );
//test for prefix sum
hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset);
H_ERR(hipDeviceSynchronize() );
index_t N1,N2,N3;
H_ERR(hipMemcpy(&N1 , &small_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) );
H_ERR(hipMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) );
H_ERR(hipMemcpy(&N3 , &large_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) );
H_ERR(hipDeviceSynchronize() );
// cout<<"N1 = "<<N1<<"\n";
// cout<<"N2 = "<<N2<<"\n";
// cout<<"N3 = "<<N3<<"\n";
hipLaunchKernelGGL(( collect_kernel) , dim3(max_block),dim3(max_thd), 0, 0,
src_adj,
src_head,
dev_begin,
totalEdgeCount,
small_num,
mid_num,
large_num,
N1,
N2,
classified_head,
classified_adj
);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( warp_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0,
classified_head,
classified_adj,
dev_adj,
// dev_degree,
dev_begin,
0,
N1,
dev_count
);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( block_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0,
classified_head,
classified_adj,
//dev_head,
//dev_adj,
dev_adj,
// dev_degree,
dev_begin,
N1,
// 0 + GPU_id*256,
totalEdgeCount,
dev_count
);
H_ERR(hipDeviceSynchronize() );
hipLaunchKernelGGL(( reduce_kernel2) , dim3(1),dim3(1), 0, 0, dev_count);
H_ERR(hipDeviceSynchronize() );
H_ERR(hipMemcpy(&count[i], dev_count, sizeof(index_t), hipMemcpyDeviceToHost));
thd_count += count[i];
H_ERR(hipFree(small_num) );
H_ERR(hipFree(large_num) );
H_ERR(hipFree(classified_head) );
H_ERR(hipFree(classified_adj) );
H_ERR(hipFree(src_head) );
H_ERR(hipFree(src_adj) );
// H_ERR(hipFree(src_begin) );
cout<<"GPU "<<i<<" part "<<j<<"\n";
}
double time4 = wtime();
count[i] = thd_count;
cout<<"gpu "<<i<<" binary count="<<count[i]<<"\n";
cout<<"time = "<<time4-time2<<" seconds"<<endl;
H_ERR(hipFree(dev_adj) );
H_ERR(hipFree(dev_head) );
// H_ERR(hipFree(dev_degree) );
H_ERR(hipFree(dev_begin) );
H_ERR(hipFree(block_offset) );
H_ERR(hipFree(dev_count) );
return NULL;
}
| 7a052c93c911efe6dfaabbf8e64c2399c79f5e58.cu | //scan.cu
//#include "kernel.cu"
#include "comm.h"
#include "wtime.h"
#include "iostream"
#define max_thd 256
#define max_block 256
#define thread_limit 256
#define block_limit 1024
#define GPU_COWORKER 1
graph * mygraph;
__global__ void block_binary_kernel
( vertex_t* head,
vertex_t* adj,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ max_thd;
int i = threadIdx.x% max_thd;
index_t mycount=0;
// __shared__ vertex_t cache[256];
__shared__ index_t local[max_thd];
while(tid<Ne){
vertex_t A = head[tid];
vertex_t B = adj[tid];
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[i]=a[i*m/max_thd];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = max_thd;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[r];
if(X==Y){
mycount++;
bot = top + max_thd;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/max_thd;
top = top*m/max_thd -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += max_thd;
}
tid += GPU_COWORKER * gridDim.x*blockDim.x/ max_thd;
__syncthreads();
}
//reduce
__syncthreads();
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
count[blockIdx.x]+=val;
// count[blockIdx.x]=val;
}
}
__global__ void warp_binary_kernel
( vertex_t* head,
vertex_t* adj,
vertex_t* adj_list,
index_t* begin,
index_t Ns,
index_t Ne,
index_t* count
)
{
//phase 1, partition
index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns;
index_t mycount=0;
__shared__ index_t local[max_thd];
int i = threadIdx.x%32;
int p = threadIdx.x/32;
while(tid<Ne){
vertex_t A = head[tid];
vertex_t B = adj[tid];
index_t m = begin[A+1]-begin[A];//degree[A];
index_t n = begin[B+1]-begin[B];//degree[B];
index_t temp;
if(m<n){
temp = A;
A = B;
B = temp;
temp = m;
m = n;
n = temp;
}
vertex_t* a = &(adj_list[begin[A]]);
vertex_t* b = &(adj_list[begin[B]]);
//initial cache
local[p*32+i]=a[i*m/32];
__syncthreads();
//search
int j=i;
while(j<n){
vertex_t X = b[j];
vertex_t Y;
//phase 1: cache
int bot = 0;
int top = 32;
int r;
while(top>bot+1){
r = (top+bot)/2;
Y = local[p*32+r];
if(X==Y){
mycount++;
bot = top + 32;
}
if(X<Y){
top = r;
}
if(X>Y){
bot = r;
}
}
//phase 2
bot = bot*m/32;
top = top*m/32 -1;
while(top>=bot){
r = (top+bot)/2;
Y = a[r];
if(X==Y){
mycount++;
}
if(X<=Y){
top = r-1;
}
if(X>=Y){
bot = r+1;
}
}
j += 32;
}
tid += GPU_COWORKER* blockDim.x*gridDim.x/32;
__syncthreads();
}
__syncthreads();
//reduce
local[threadIdx.x] = mycount;
__syncthreads();
if(threadIdx.x==0){
index_t val=0;
for(int i=0; i<blockDim.x; i++){
val+= local[i];
}
count[blockIdx.x]=val;
}
__syncthreads();
}
//----------------------------------------------------------------------------------------
__global__ void classify_kernel //step 1: classify the edge list into different arrays
( vertex_t* adj_list,
vertex_t* head_list,
index_t* begin,
index_t N, //inputs
index_t* small_num,
index_t* mid_num,
index_t* large_num
//outputs: small/large head, adjacent, and number by thread
)
{
int tid = threadIdx.x +blockIdx.x*blockDim.x;
index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1;
index_t thd_base = tid*bin_size; //start point of threads space
index_t small_offset=0;
index_t mid_offset=0;
index_t large_offset=0;
//temp variables
vertex_t head;
vertex_t adj;
index_t m;
index_t n;
for(index_t i=0; i<bin_size; i++){
index_t id = thd_base + i;
if(id<N){
head = head_list[id];
adj = adj_list[id];
m = begin[head+1]-begin[head];//degree[head];
n = begin[adj+1]-begin[adj];//degree[adj];
if(m<n){
n=m;
}
if(n<thread_limit){
small_offset++;
}
// else if(n<block_limit){ //could be more then 2 catigories
else{
mid_offset++;
}
/*
else { //could be more then 2 catigories
large_offset++;
}
*/
}
}
small_num[tid] = small_offset;
mid_num[tid] = mid_offset;
large_num[tid] = large_offset;
}
__global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256
(
index_t* data,
index_t* block_offset
)
{
//step 1: each block do prefix sum inside
int tid = threadIdx.x +blockIdx.x*blockDim.x;
__shared__ index_t temp_in[256];
temp_in[threadIdx.x] = data[tid];
__syncthreads();
index_t val=0;
for(int i=0; i<=threadIdx.x; i++){
val += temp_in[i];
}
__syncthreads();
if(threadIdx.x==255){
block_offset[blockIdx.x] = val;
}
data[tid] = val;
__syncthreads();
}
__global__ void prefix_kernel_2
(
index_t* block_offset
)
{
//step 2: collect each block's offset and do prefix for this set
__shared__ index_t temp_in[256];
temp_in[threadIdx.x] = block_offset[threadIdx.x];
__syncthreads();
index_t val=0;
for(int i=0; i<threadIdx.x; i++){
val += temp_in[i];
}
// val = temp_in[threadIdx.x];
block_offset[threadIdx.x] = val;
__syncthreads();
}
__global__ void prefix_kernel_3
(
index_t* data,
index_t* block_offset
)
{
//step 3: update by adding block offset
int tid = threadIdx.x + blockIdx.x*blockDim.x;
index_t val = data[tid];
index_t offset = block_offset[blockIdx.x];
val += offset;
data[tid] = val;
__syncthreads();
}
__global__ void collect_kernel
( vertex_t* adj_list,
vertex_t* head_list,
index_t* begin,
index_t N,
index_t* small_num,
index_t* mid_num,
index_t* large_num,
index_t N1,
index_t N2,
vertex_t* dest_head,
vertex_t* dest_adj
)
{
int tid = threadIdx.x +blockIdx.x*blockDim.x;
index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1;
index_t thd_base = tid*bin_size; //start point of threads space
index_t thd_base_small = 0;
index_t thd_base_mid = N1;
index_t thd_base_large = N1+N2;
if(tid!=0){
thd_base_small = small_num[tid-1];
thd_base_mid = N1 + mid_num[tid-1];
thd_base_large = N1 + N2 + large_num[tid-1];
}
//temp variables
vertex_t head;
vertex_t adj;
index_t m;
index_t n;
index_t small_offset = thd_base_small;
index_t mid_offset = thd_base_mid;
index_t large_offset = thd_base_large;
for(index_t i=0; i<bin_size; i++){
index_t id = thd_base + i;
if(id<N){
head = head_list[id];
adj = adj_list[id];
m = begin[head+1]-begin[head];//degree[head];
n = begin[adj+1]-begin[adj];//degree[adj];
if(m<n){
n=m;
}
if(n<thread_limit){
dest_head[small_offset] = head;
dest_adj [small_offset] = adj;
small_offset++;
}
// else if(n<block_limit){ //could be more then 2 catigories
else{
dest_head[mid_offset] = head;
dest_adj [mid_offset] = adj;
mid_offset++;
}
/* else { //could be more then 2 catigories
dest_head[large_offset] = head;
dest_adj [large_offset] = adj;
large_offset++;
}
*/
}
}
}
__global__ void reduce_kernel2(index_t* count)
{
index_t val = 0;
for(int i=0; i<max_block; i++){
val += count[i];
}
count[0] = val;
}
//---------------------------------------- cpu function--------------------
//------------------------------------------------------------------
void* part_scan(void * data){
index_t thd_count=0;
int GPU_id = *(int*)data;
int i = GPU_id;
// cout<<"GPU id = "<<GPU_id<<"\n";
cudaSetDevice(GPU_id);
H_ERR(cudaDeviceSynchronize() );
vertex_t* dev_adj;
vertex_t* dev_head;
index_t* dev_begin;
index_t* dev_count;
index_t partEdgeCount = mygraph->partEdgeCount[i];
vertex_t vert_count = mygraph->vert_count;
vertex_t* partAdj = mygraph->partAdj[i];
vertex_t* partHead= mygraph->partHead[i];
// index_t* partDegree = mygraph->partDegree[i];
index_t* partBegin = mygraph->partBegin[i];
index_t* count = mygraph->count;
H_ERR(cudaMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMalloc(&dev_head, partEdgeCount*sizeof(vertex_t)) );
// H_ERR(cudaMalloc(&dev_degree, vert_count*sizeof(index_t)) );
H_ERR(cudaMalloc(&dev_begin, (vert_count+1)*sizeof(index_t)) );
H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) );
index_t* block_offset;
H_ERR(cudaMalloc(&block_offset, max_block*sizeof(index_t)) );
H_ERR(cudaMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMemcpy(dev_head, partHead, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
// H_ERR(cudaMemcpy(dev_degree, partDegree, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMemcpy(dev_begin, partBegin, (vert_count+1)*sizeof(index_t), cudaMemcpyHostToDevice) );
double time2=wtime();
for(int j=0; j<PART_NUM; j++){
index_t totalEdgeCount = mygraph->partEdgeCount[j];
vertex_t* head = mygraph->partHead[j];
vertex_t* adj = mygraph->partAdj[j];
// index_t* degree = mygraph->partDegree[j];
vertex_t* classified_head;
vertex_t* classified_adj;
index_t* small_num;
index_t* mid_num;
index_t* large_num;
vertex_t* src_head;
vertex_t* src_adj;
// index_t* src_degree;
H_ERR(cudaMalloc(&small_num, max_thd*max_block*sizeof(index_t)) );
H_ERR(cudaMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) );
H_ERR(cudaMalloc(&large_num, max_thd*max_block*sizeof(index_t)) );
H_ERR(cudaMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) );
// H_ERR(cudaMalloc(&src_degree, vert_count*sizeof(index_t)) );
H_ERR(cudaMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) );
// H_ERR(cudaMemcpy(src_degree, degree, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) );
H_ERR(cudaMalloc(&classified_head, totalEdgeCount*sizeof(vertex_t)) );
H_ERR(cudaMalloc(&classified_adj, totalEdgeCount*sizeof(vertex_t)) );
//
double time1=wtime();
H_ERR(cudaDeviceSynchronize() );
classify_kernel <<<max_block,max_thd>>>(
src_adj,
src_head,
dev_begin,
totalEdgeCount,
small_num,
mid_num,
large_num
);
H_ERR(cudaDeviceSynchronize() );
//test for prefix sum
prefix_kernel_1 <<<max_block,max_thd>>>(small_num, block_offset);
H_ERR(cudaDeviceSynchronize() );
prefix_kernel_2 <<<1,max_thd>>>(block_offset);
H_ERR(cudaDeviceSynchronize() );
prefix_kernel_3 <<<max_block,max_thd>>>(small_num, block_offset);
H_ERR(cudaDeviceSynchronize() );
prefix_kernel_1 <<<max_block,max_thd>>>(mid_num, block_offset);
H_ERR(cudaDeviceSynchronize() );
prefix_kernel_2 <<<1,max_thd>>>(block_offset);
H_ERR(cudaDeviceSynchronize() );
prefix_kernel_3 <<<max_block,max_thd>>>(mid_num, block_offset);
H_ERR(cudaDeviceSynchronize() );
prefix_kernel_1 <<<max_block,max_thd>>>(large_num, block_offset);
H_ERR(cudaDeviceSynchronize() );
prefix_kernel_2 <<<1,max_thd>>>(block_offset);
H_ERR(cudaDeviceSynchronize() );
prefix_kernel_3 <<<max_block,max_thd>>>(large_num, block_offset);
H_ERR(cudaDeviceSynchronize() );
index_t N1,N2,N3;
H_ERR(cudaMemcpy(&N1 , &small_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) );
H_ERR(cudaMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) );
H_ERR(cudaMemcpy(&N3 , &large_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) );
H_ERR(cudaDeviceSynchronize() );
// cout<<"N1 = "<<N1<<"\n";
// cout<<"N2 = "<<N2<<"\n";
// cout<<"N3 = "<<N3<<"\n";
collect_kernel <<<max_block,max_thd>>>(
src_adj,
src_head,
dev_begin,
totalEdgeCount,
small_num,
mid_num,
large_num,
N1,
N2,
classified_head,
classified_adj
);
H_ERR(cudaDeviceSynchronize() );
warp_binary_kernel<<<max_block,max_thd>>>
( classified_head,
classified_adj,
dev_adj,
// dev_degree,
dev_begin,
0,
N1,
dev_count
);
H_ERR(cudaDeviceSynchronize() );
block_binary_kernel<<<max_block,max_thd>>>
( classified_head,
classified_adj,
//dev_head,
//dev_adj,
dev_adj,
// dev_degree,
dev_begin,
N1,
// 0 + GPU_id*256,
totalEdgeCount,
dev_count
);
H_ERR(cudaDeviceSynchronize() );
reduce_kernel2 <<<1,1>>>(dev_count);
H_ERR(cudaDeviceSynchronize() );
H_ERR(cudaMemcpy(&count[i], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost));
thd_count += count[i];
H_ERR(cudaFree(small_num) );
H_ERR(cudaFree(large_num) );
H_ERR(cudaFree(classified_head) );
H_ERR(cudaFree(classified_adj) );
H_ERR(cudaFree(src_head) );
H_ERR(cudaFree(src_adj) );
// H_ERR(cudaFree(src_begin) );
cout<<"GPU "<<i<<" part "<<j<<"\n";
}
double time4 = wtime();
count[i] = thd_count;
cout<<"gpu "<<i<<" binary count="<<count[i]<<"\n";
cout<<"time = "<<time4-time2<<" seconds"<<endl;
H_ERR(cudaFree(dev_adj) );
H_ERR(cudaFree(dev_head) );
// H_ERR(cudaFree(dev_degree) );
H_ERR(cudaFree(dev_begin) );
H_ERR(cudaFree(block_offset) );
H_ERR(cudaFree(dev_count) );
return NULL;
}
|
a26a2585c8398547adf2409679b8be3e3be3e38a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cstdio>
#include <iostream>
#include <vector>
#include <cstdlib>
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <math.h>
#include <string>
#include <conio.h>
#include <stddef.h>
#include <fstream>
#include <sstream>
#include <tchar.h>
#include <gmp_util.h>
typedef long big_int;
using mpfr::mpreal;
__shared__ int dimension, ciclo, x, cram;
mpfr::mpreal temp, aux, temp1, det00;
std::vector < std::vector <mpreal> > matrixChi;
std::vector < std::vector <mpreal> > matrixChi2;
void input(mpreal array[][3], mpreal array1[][1]);
mpreal determinent(mpreal array[][3]);
mpreal calculate(mpreal array[][3], int a, int b, int c);
mpreal copy(mpreal array[][3], mpreal array1[][1], int a);
void comp_copy(mpreal array[][3], mpreal array1[][3]);
mpreal determinent4(mpreal array[][4]);
std::vector<std::vector<mpreal> > newdet(mpreal array[][4], int col);
void input4(mpreal array[][4], mpreal array1[][1]);
mpreal copy4(mpreal array[][4], mpreal array1[][1], int a);
void comp_copy4(mpreal array[][4], mpreal array1[][4]);
__host__ __device__ void f() {
#ifdef __CUDA_ARCH__
printf("Hebra en CUDA: %d\n", threadIdx.x);
#else
printf("CUDA Funcionando!\n");
#endif
}
__global__ void kernel() {
f();
}
int main()
{
const int digits = 200;
mpreal::set_default_prec(mpfr::digits2bits(digits));
mpreal overflow = std::numeric_limits<mpreal>::max();
ciclo = 1;
kernel << <1, 1 >> >();
if (hipDeviceSynchronize() != hipSuccess) {
fprintf(stderr, "CUDA Fallo\n");
}
f();
std::cout << "Corriendo variables con " << digits << " bits de largo\n\n";
std::string s;
x = 0;
std::ifstream myReadFile;
myReadFile.open("matrix.txt");
if (myReadFile.is_open()) {
myReadFile >> s;
std::stringstream geek(s);
dimension = 0;
geek >> dimension;
std::cout << "Dimensiones del problema: " << dimension << "\n";
}
myReadFile >> s;
std::stringstream geek(s);
cram = 0;
geek >> cram;
//generando CHI
matrixChi.resize(dimension);
for (int i = 0; i < dimension; i++)
{
matrixChi[i].resize(dimension + 1);
}
matrixChi2.resize(dimension - 1);
for (int i = 0; i < dimension - 1; i++)
{
matrixChi2[i].resize(dimension);
}
//numeros desde file=matrix.txt
if (myReadFile.is_open()) {
for (int i = 0; i < dimension; i++) {
for (int j = 0; j < dimension + 1; j++) {
myReadFile >> s;
std::stringstream geek(s);
x = 0;
geek >> x;
matrixChi[i][j] = x;
}
}
}
myReadFile.close();
for (int i = 0; i < dimension; i++) {
for (int j = 0; j < dimension + 1; j++)
if (j == dimension) std::cout << " | " << (mpreal)(matrixChi[i][j]) << " ";
else std::cout << matrixChi[i][j] << " ";
std::cout << "\n";
}
while (dimension > cram) {
std::cout << "Generando Matrix de nivel " << dimension << "\n";
//sacando pivote
std::cout << "Pivote es " << matrixChi[0][0] << "\n";
for (int i = 0; i < dimension - 1; i++)
for (int j = 0; j < dimension; j++)
matrixChi2[i][j] = (mpreal)(matrixChi[0][0] * matrixChi[i + 1][j + 1] - matrixChi[i + 1][0] * matrixChi[0][j + 1]);
std::cout << "Condensacion a ciclo " << ciclo << "\n";
ciclo++;
for (int i = 0; i < dimension - 1; i++) {
for (int j = 0; j < dimension; j++)
if (j == dimension - 1) std::cout << " | " << (mpreal)(matrixChi2[i][j]) << " ";
else std::cout << (mpreal)(matrixChi2[i][j]) << " ";
std::cout << "\n";
}
//generando CHI
dimension--;
matrixChi.resize(dimension);
for (int i = 0; i < dimension; i++)
{
matrixChi[i].resize(dimension + 1);
}
//poner datos de chi2 en chi
for (int i = 0; i < dimension; i++)
for (int j = 0; j < dimension + 1; j++)
matrixChi[i][j] = (mpreal)matrixChi2[i][j];
//generando la que se determina
matrixChi2.resize(dimension - 1);
for (int i = 0; i < dimension - 1; i++)
{
matrixChi2[i].resize(dimension);
}
}
//comienza cramer
std::cout << "\nDeterminantes de Cramer\n\n";
bool sahi = true;
while (sahi)
{
if (cram == 3 && cram != 4) {
//long double permite valores de 1.8 10^308
mpreal matrix[3][3];
mpreal matrix1[3][1];
mpreal reserve[3][3];
mpreal detr[3];
int sp1 = 0, teen = 1;
int cont = 0;
char in;
//pasa los valores de matrixchi a la matriz de valores y resultados
input(matrix, matrix1);
//se respalda la matriz
comp_copy(reserve, matrix);
//se calcula el determinante general o coeficiente de la matriz
det00 = determinent(matrix);
while (sp1<3)
{
detr[cont] = copy(matrix, matrix1, sp1);
comp_copy(matrix, reserve);
cont++;
sp1++;
}
cont = 0;
while (cont<3)
{
std::cout << "x" << teen << " = " << (mpreal)detr[cont] << " /" << (mpreal)det00 << " [" << (mpreal)detr[cont] / (mpreal)det00 << "]" << std::endl;
cont++;
teen++;
}
mpreal x1 = ((mpreal)detr[0] / (mpreal)det00);
mpreal x2 = ((mpreal)detr[1] / (mpreal)det00);
mpreal x3 = ((mpreal)detr[2] / (mpreal)det00);
mpreal err_total = 0;
cont = 0;
while (cont<3)
{
//calculo de errores
mpreal resultado = matrix[cont][0] * x1 + matrix[cont][1] * x2 + matrix[cont][2] * x3;
mpreal error = 100 - ((matrix1[cont][0] / resultado) * 100);
std::cout << "[ESPERADO / OBTENIDO] para Ecuacion " << cont + 1 << "\n";
std::cout << "[" << (mpreal)matrix1[cont][0] << " / " << (mpreal)resultado << "] - ";
std::cout << "Error Relativo del " << abs((mpreal)error) << "%\n\n";
err_total += abs((mpreal)error);
cont++;
}
std::cout << "Error Promedio del " << (mpreal)err_total / 3 << "%\n";
std::cout << "Finalizado, presione X para terminar\n" << overflow;
std::cin >> in;
if (in == 'x' || in == 'X')
return 1;
}
else if (cram == 4) {
mpreal matrix[4][4];
mpreal matrix1[4][1];
mpreal reserve[4][4];
mpreal detr[4];
int sp1 = 0, teen = 1;
int cont = 0;
char in;
//pasa los valores de matrixchi a la matriz de valores y resultados
input4(matrix, matrix1);
//se respalda la matriz
comp_copy4(reserve, matrix);
//se calcula el determinante general o coeficiente de la matriz
det00 = determinent4(matrix);
while (sp1<4)
{
detr[cont] = copy4(matrix, matrix1, sp1);
comp_copy4(matrix, reserve);
cont++;
sp1++;
}
cont = 0;
while (cont<4)
{
std::cout << "x" << teen << " = " << (mpreal)detr[cont] << " /" << (mpreal)det00 << " [" << (mpreal)detr[cont] / (mpreal)det00 << "]" << std::endl;
cont++;
teen++;
}
mpreal x1 = ((mpreal)detr[0] / (mpreal)det00);
mpreal x2 = ((mpreal)detr[1] / (mpreal)det00);
mpreal x3 = ((mpreal)detr[2] / (mpreal)det00);
mpreal x4 = ((mpreal)detr[3] / (mpreal)det00);
mpreal err_total = 0;
cont = 0;
/*for (int k = 0; k<4; k++) {
for (int l = 0; l<4; l++) {
std::cout << matrix[k][l] << " ";
}
std::cout << "\n";
}
for (int l = 0; l<4; l++) {
std::cout << matrix1[0][l] << "\n";
}*/
while (cont<4)
{
//calculo de errores
/*std::cout << "test x1 " << matrix[cont][0] * x1 << "\n";
std::cout << "test x2 " << matrix[cont][1] * x2 << "\n";
std::cout << "test x3 " << matrix[cont][2] * x3 << "\n";
std::cout << "test x4 " << matrix[cont][3] * x4 << "\n";
std::cout << "test sum" << matrix[cont][0] * x1 + matrix[cont][1] * x2 + matrix[cont][2] * x3 + matrix[cont][3] * x4 << "\n";*/
mpreal resultado = matrix[cont][0] * x1 + matrix[cont][1] * x2 + matrix[cont][2] * x3 + matrix[cont][3] * x4;
/*std::cout << "Resultado " << resultado << "\n";*/
mpreal error = 100 - ((matrix1[cont][0] / resultado) * 100);
std::cout << "\n[ESPERADO / OBTENIDO] para Ecuacion " << cont + 1 << "\n";
std::cout << "[" << (mpreal)matrix1[cont][0] << " / " << (mpreal)resultado << "] - ";
std::cout << "Error Relativo del " << abs((mpreal)error) << "%\n\n";
err_total += abs((mpreal)error);
cont++;
}
std::cout << "Error Promedio del " << (float)err_total / 4 << "%\n";
std::cout << "Finalizado, presione X para terminar\n";
std::cin >> in;
if (in == 'x' || in == 'X')
return 1;
}
}
std::cout.flush();
return 0;
}
void input(mpreal array[][3], mpreal array1[][1])
{
//traspasa los valores de la matriz X Y Z
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
array[i][j] = (mpreal)matrixChi[i][j];
}
}
//traspasa los resultados de cada ecuacion X + Y + Z = RESULTADO
for (int i = 0; i < 3; i++) {
array1[i][0] = (mpreal)matrixChi[i][3];
}
}
mpreal determinent(mpreal array[][3])
{
int rows = 1, col = 1;
int z = 0;
temp = 0;
int cont = 1;
int x = 0;
while (x<3)
{
//por cada columna los determinantes de 2x2 y los multiplica por l columna nueva dando el 3x3
temp = temp + cont*(array[0][x] * calculate(array, rows, col, z));
col = col * 0;
z = z + cont;
cont = cont*-1;
x++;
}
if(cram == 3)
std::cout << "\nDeterminante de la matrix 3x3 es " << temp << "\n\n";
return temp;
}
mpreal calculate(mpreal array[][3], int a, int b, int c)
{
//calcula los determinantes de 2x2
temp1 = (array[a][b] * array[a + 1][b + 1 + c]) - (array[a + 1][b] * array[a][b + 1 + c]);
return temp1;
}
mpreal copy(mpreal array[][3], mpreal array1[][1], int a)
{
//traspasa los valores de la matriz de resultados a la matriz de valores
int col = 0;
temp = 0;
while (col<3)
{
array[col][a] = array1[col][0];
col++;
}
int i = 0, j = 0;
while (i<3)
{
j = 0;
while (j<3)
{
std::cout << array[i][j] << " ";
j++;
}
std::cout << std::endl;
i++;
}
temp = determinent(array);
return temp;
}
void comp_copy(mpreal array[][3], mpreal array1[][3])
{
//traspasa los valores de la matriz de resultados a la matriz de valores
int rows = 0, col = 0;
while (rows<3)
{
col = 0;
while (col<3)
{
array[rows][col] = array1[rows][col];
col++;
}
rows++;
}
}
mpreal determinent4(mpreal array[][4]) {
int i, j, k;
aux = 0;
std::vector<std::vector<mpreal> > matrix(3, std::vector<mpreal>(3));
mpreal matrixaux[3][3];
for (i = 0; i<4; i++) {
matrix = newdet(array, i);
for (j = 0; j<3; j++) {
for (k = 0; k<3; k++) {
matrixaux[j][k] = matrix[j][k];
}
}
aux = aux + pow(-1.0, (mpreal)i)*array[0][i] * (determinent(matrixaux));
}
std::cout << "\nDeterminante de la matrix 4x4 es " << aux << "\n\n";
return aux;
}
std::vector<std::vector<mpreal> > newdet(mpreal array[][4], int col) {
std::vector<std::vector<mpreal> > matrix(3, std::vector<mpreal>(3));
int cont = 0, i, j;
for (i = 1; i<4; i++) {
for (j = 0; j<4; j++) {
if (j != col) {
matrix[i - 1][cont] = array[i][j];
cont++;
}
}
cont = 0;
}
return matrix;
}
void comp_copy4(mpreal array[][4], mpreal array1[][4])
{
//traspasa los valores de la matriz de resultados a la matriz de valores
int rows = 0, col = 0;
while (rows<4)
{
col = 0;
while (col<4)
{
array[rows][col] = array1[rows][col];
col++;
}
rows++;
}
}
mpreal copy4(mpreal array[][4], mpreal array1[][1], int a)
{
//traspasa los valores de la matriz de resultados a la matriz de valores
int col = 0;
temp = 0;
while (col<4)
{
array[col][a] = array1[col][0];
col++;
}
int i = 0, j = 0;
while (i<4)
{
j = 0;
while (j<4)
{
std::cout << array[i][j] << " ";
j++;
}
std::cout << std::endl;
i++;
}
std::cout << "\n";
temp = determinent4(array);
return temp;
}
void input4(mpreal array[][4], mpreal array1[][1])
{
//traspasa los valores de la matriz X Y Z
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
array[i][j] = (mpreal)matrixChi[i][j];
}
}
//traspasa los resultados de cada ecuacion X + Y + Z = RESULTADO
for (int i = 0; i < 4; i++) {
array1[i][0] = (mpreal)matrixChi[i][4];
}
} | a26a2585c8398547adf2409679b8be3e3be3e38a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cstdio>
#include <iostream>
#include <vector>
#include <cstdlib>
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <math.h>
#include <string>
#include <conio.h>
#include <stddef.h>
#include <fstream>
#include <sstream>
#include <tchar.h>
#include <gmp_util.h>
typedef long big_int;
using mpfr::mpreal;
__shared__ int dimension, ciclo, x, cram;
mpfr::mpreal temp, aux, temp1, det00;
std::vector < std::vector <mpreal> > matrixChi;
std::vector < std::vector <mpreal> > matrixChi2;
void input(mpreal array[][3], mpreal array1[][1]);
mpreal determinent(mpreal array[][3]);
mpreal calculate(mpreal array[][3], int a, int b, int c);
mpreal copy(mpreal array[][3], mpreal array1[][1], int a);
void comp_copy(mpreal array[][3], mpreal array1[][3]);
mpreal determinent4(mpreal array[][4]);
std::vector<std::vector<mpreal> > newdet(mpreal array[][4], int col);
void input4(mpreal array[][4], mpreal array1[][1]);
mpreal copy4(mpreal array[][4], mpreal array1[][1], int a);
void comp_copy4(mpreal array[][4], mpreal array1[][4]);
__host__ __device__ void f() {
#ifdef __CUDA_ARCH__
printf("Hebra en CUDA: %d\n", threadIdx.x);
#else
printf("CUDA Funcionando!\n");
#endif
}
__global__ void kernel() {
f();
}
int main()
{
const int digits = 200;
mpreal::set_default_prec(mpfr::digits2bits(digits));
mpreal overflow = std::numeric_limits<mpreal>::max();
ciclo = 1;
kernel << <1, 1 >> >();
if (cudaDeviceSynchronize() != cudaSuccess) {
fprintf(stderr, "CUDA Fallo\n");
}
f();
std::cout << "Corriendo variables con " << digits << " bits de largo\n\n";
std::string s;
x = 0;
std::ifstream myReadFile;
myReadFile.open("matrix.txt");
if (myReadFile.is_open()) {
myReadFile >> s;
std::stringstream geek(s);
dimension = 0;
geek >> dimension;
std::cout << "Dimensiones del problema: " << dimension << "\n";
}
myReadFile >> s;
std::stringstream geek(s);
cram = 0;
geek >> cram;
//generando CHI
matrixChi.resize(dimension);
for (int i = 0; i < dimension; i++)
{
matrixChi[i].resize(dimension + 1);
}
matrixChi2.resize(dimension - 1);
for (int i = 0; i < dimension - 1; i++)
{
matrixChi2[i].resize(dimension);
}
//numeros desde file=matrix.txt
if (myReadFile.is_open()) {
for (int i = 0; i < dimension; i++) {
for (int j = 0; j < dimension + 1; j++) {
myReadFile >> s;
std::stringstream geek(s);
x = 0;
geek >> x;
matrixChi[i][j] = x;
}
}
}
myReadFile.close();
for (int i = 0; i < dimension; i++) {
for (int j = 0; j < dimension + 1; j++)
if (j == dimension) std::cout << " | " << (mpreal)(matrixChi[i][j]) << " ";
else std::cout << matrixChi[i][j] << " ";
std::cout << "\n";
}
while (dimension > cram) {
std::cout << "Generando Matrix de nivel " << dimension << "\n";
//sacando pivote
std::cout << "Pivote es " << matrixChi[0][0] << "\n";
for (int i = 0; i < dimension - 1; i++)
for (int j = 0; j < dimension; j++)
matrixChi2[i][j] = (mpreal)(matrixChi[0][0] * matrixChi[i + 1][j + 1] - matrixChi[i + 1][0] * matrixChi[0][j + 1]);
std::cout << "Condensacion a ciclo " << ciclo << "\n";
ciclo++;
for (int i = 0; i < dimension - 1; i++) {
for (int j = 0; j < dimension; j++)
if (j == dimension - 1) std::cout << " | " << (mpreal)(matrixChi2[i][j]) << " ";
else std::cout << (mpreal)(matrixChi2[i][j]) << " ";
std::cout << "\n";
}
//generando CHI
dimension--;
matrixChi.resize(dimension);
for (int i = 0; i < dimension; i++)
{
matrixChi[i].resize(dimension + 1);
}
//poner datos de chi2 en chi
for (int i = 0; i < dimension; i++)
for (int j = 0; j < dimension + 1; j++)
matrixChi[i][j] = (mpreal)matrixChi2[i][j];
//generando la que se determina
matrixChi2.resize(dimension - 1);
for (int i = 0; i < dimension - 1; i++)
{
matrixChi2[i].resize(dimension);
}
}
//comienza cramer
std::cout << "\nDeterminantes de Cramer\n\n";
bool sahi = true;
while (sahi)
{
if (cram == 3 && cram != 4) {
//long double permite valores de 1.8 × 10^308
mpreal matrix[3][3];
mpreal matrix1[3][1];
mpreal reserve[3][3];
mpreal detr[3];
int sp1 = 0, teen = 1;
int cont = 0;
char in;
//pasa los valores de matrixchi a la matriz de valores y resultados
input(matrix, matrix1);
//se respalda la matriz
comp_copy(reserve, matrix);
//se calcula el determinante general o coeficiente de la matriz
det00 = determinent(matrix);
while (sp1<3)
{
detr[cont] = copy(matrix, matrix1, sp1);
comp_copy(matrix, reserve);
cont++;
sp1++;
}
cont = 0;
while (cont<3)
{
std::cout << "x" << teen << " = " << (mpreal)detr[cont] << " /" << (mpreal)det00 << " [" << (mpreal)detr[cont] / (mpreal)det00 << "]" << std::endl;
cont++;
teen++;
}
mpreal x1 = ((mpreal)detr[0] / (mpreal)det00);
mpreal x2 = ((mpreal)detr[1] / (mpreal)det00);
mpreal x3 = ((mpreal)detr[2] / (mpreal)det00);
mpreal err_total = 0;
cont = 0;
while (cont<3)
{
//calculo de errores
mpreal resultado = matrix[cont][0] * x1 + matrix[cont][1] * x2 + matrix[cont][2] * x3;
mpreal error = 100 - ((matrix1[cont][0] / resultado) * 100);
std::cout << "[ESPERADO / OBTENIDO] para Ecuacion " << cont + 1 << "\n";
std::cout << "[" << (mpreal)matrix1[cont][0] << " / " << (mpreal)resultado << "] - ";
std::cout << "Error Relativo del " << abs((mpreal)error) << "%\n\n";
err_total += abs((mpreal)error);
cont++;
}
std::cout << "Error Promedio del " << (mpreal)err_total / 3 << "%\n";
std::cout << "Finalizado, presione X para terminar\n" << overflow;
std::cin >> in;
if (in == 'x' || in == 'X')
return 1;
}
else if (cram == 4) {
mpreal matrix[4][4];
mpreal matrix1[4][1];
mpreal reserve[4][4];
mpreal detr[4];
int sp1 = 0, teen = 1;
int cont = 0;
char in;
//pasa los valores de matrixchi a la matriz de valores y resultados
input4(matrix, matrix1);
//se respalda la matriz
comp_copy4(reserve, matrix);
//se calcula el determinante general o coeficiente de la matriz
det00 = determinent4(matrix);
while (sp1<4)
{
detr[cont] = copy4(matrix, matrix1, sp1);
comp_copy4(matrix, reserve);
cont++;
sp1++;
}
cont = 0;
while (cont<4)
{
std::cout << "x" << teen << " = " << (mpreal)detr[cont] << " /" << (mpreal)det00 << " [" << (mpreal)detr[cont] / (mpreal)det00 << "]" << std::endl;
cont++;
teen++;
}
mpreal x1 = ((mpreal)detr[0] / (mpreal)det00);
mpreal x2 = ((mpreal)detr[1] / (mpreal)det00);
mpreal x3 = ((mpreal)detr[2] / (mpreal)det00);
mpreal x4 = ((mpreal)detr[3] / (mpreal)det00);
mpreal err_total = 0;
cont = 0;
/*for (int k = 0; k<4; k++) {
for (int l = 0; l<4; l++) {
std::cout << matrix[k][l] << " ";
}
std::cout << "\n";
}
for (int l = 0; l<4; l++) {
std::cout << matrix1[0][l] << "\n";
}*/
while (cont<4)
{
//calculo de errores
/*std::cout << "test x1 " << matrix[cont][0] * x1 << "\n";
std::cout << "test x2 " << matrix[cont][1] * x2 << "\n";
std::cout << "test x3 " << matrix[cont][2] * x3 << "\n";
std::cout << "test x4 " << matrix[cont][3] * x4 << "\n";
std::cout << "test sum" << matrix[cont][0] * x1 + matrix[cont][1] * x2 + matrix[cont][2] * x3 + matrix[cont][3] * x4 << "\n";*/
mpreal resultado = matrix[cont][0] * x1 + matrix[cont][1] * x2 + matrix[cont][2] * x3 + matrix[cont][3] * x4;
/*std::cout << "Resultado " << resultado << "\n";*/
mpreal error = 100 - ((matrix1[cont][0] / resultado) * 100);
std::cout << "\n[ESPERADO / OBTENIDO] para Ecuacion " << cont + 1 << "\n";
std::cout << "[" << (mpreal)matrix1[cont][0] << " / " << (mpreal)resultado << "] - ";
std::cout << "Error Relativo del " << abs((mpreal)error) << "%\n\n";
err_total += abs((mpreal)error);
cont++;
}
std::cout << "Error Promedio del " << (float)err_total / 4 << "%\n";
std::cout << "Finalizado, presione X para terminar\n";
std::cin >> in;
if (in == 'x' || in == 'X')
return 1;
}
}
std::cout.flush();
return 0;
}
void input(mpreal array[][3], mpreal array1[][1])
{
//traspasa los valores de la matriz X Y Z
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
array[i][j] = (mpreal)matrixChi[i][j];
}
}
//traspasa los resultados de cada ecuacion X + Y + Z = RESULTADO
for (int i = 0; i < 3; i++) {
array1[i][0] = (mpreal)matrixChi[i][3];
}
}
mpreal determinent(mpreal array[][3])
{
int rows = 1, col = 1;
int z = 0;
temp = 0;
int cont = 1;
int x = 0;
while (x<3)
{
//por cada columna los determinantes de 2x2 y los multiplica por l columna nueva dando el 3x3
temp = temp + cont*(array[0][x] * calculate(array, rows, col, z));
col = col * 0;
z = z + cont;
cont = cont*-1;
x++;
}
if(cram == 3)
std::cout << "\nDeterminante de la matrix 3x3 es " << temp << "\n\n";
return temp;
}
mpreal calculate(mpreal array[][3], int a, int b, int c)
{
//calcula los determinantes de 2x2
temp1 = (array[a][b] * array[a + 1][b + 1 + c]) - (array[a + 1][b] * array[a][b + 1 + c]);
return temp1;
}
mpreal copy(mpreal array[][3], mpreal array1[][1], int a)
{
//traspasa los valores de la matriz de resultados a la matriz de valores
int col = 0;
temp = 0;
while (col<3)
{
array[col][a] = array1[col][0];
col++;
}
int i = 0, j = 0;
while (i<3)
{
j = 0;
while (j<3)
{
std::cout << array[i][j] << " ";
j++;
}
std::cout << std::endl;
i++;
}
temp = determinent(array);
return temp;
}
void comp_copy(mpreal array[][3], mpreal array1[][3])
{
//traspasa los valores de la matriz de resultados a la matriz de valores
int rows = 0, col = 0;
while (rows<3)
{
col = 0;
while (col<3)
{
array[rows][col] = array1[rows][col];
col++;
}
rows++;
}
}
mpreal determinent4(mpreal array[][4]) {
int i, j, k;
aux = 0;
std::vector<std::vector<mpreal> > matrix(3, std::vector<mpreal>(3));
mpreal matrixaux[3][3];
for (i = 0; i<4; i++) {
matrix = newdet(array, i);
for (j = 0; j<3; j++) {
for (k = 0; k<3; k++) {
matrixaux[j][k] = matrix[j][k];
}
}
aux = aux + pow(-1.0, (mpreal)i)*array[0][i] * (determinent(matrixaux));
}
std::cout << "\nDeterminante de la matrix 4x4 es " << aux << "\n\n";
return aux;
}
std::vector<std::vector<mpreal> > newdet(mpreal array[][4], int col) {
std::vector<std::vector<mpreal> > matrix(3, std::vector<mpreal>(3));
int cont = 0, i, j;
for (i = 1; i<4; i++) {
for (j = 0; j<4; j++) {
if (j != col) {
matrix[i - 1][cont] = array[i][j];
cont++;
}
}
cont = 0;
}
return matrix;
}
void comp_copy4(mpreal array[][4], mpreal array1[][4])
{
//traspasa los valores de la matriz de resultados a la matriz de valores
int rows = 0, col = 0;
while (rows<4)
{
col = 0;
while (col<4)
{
array[rows][col] = array1[rows][col];
col++;
}
rows++;
}
}
mpreal copy4(mpreal array[][4], mpreal array1[][1], int a)
{
//traspasa los valores de la matriz de resultados a la matriz de valores
int col = 0;
temp = 0;
while (col<4)
{
array[col][a] = array1[col][0];
col++;
}
int i = 0, j = 0;
while (i<4)
{
j = 0;
while (j<4)
{
std::cout << array[i][j] << " ";
j++;
}
std::cout << std::endl;
i++;
}
std::cout << "\n";
temp = determinent4(array);
return temp;
}
void input4(mpreal array[][4], mpreal array1[][1])
{
//traspasa los valores de la matriz X Y Z
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
array[i][j] = (mpreal)matrixChi[i][j];
}
}
//traspasa los resultados de cada ecuacion X + Y + Z = RESULTADO
for (int i = 0; i < 4; i++) {
array1[i][0] = (mpreal)matrixChi[i][4];
}
} |
d1d649449e184bd6fb2d141c3c18494d2b84cd17.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "genmatrix.h"
#include <hip/hip_runtime.h>
template <int BLOCK_SIZE, int threadElemsPerDim> __global__ void matrixMulSharedMemPrefetchMultipleElements(float *C, float *A, float *B, int width) // sprawdzi czemu to nie dziaa
{
int a_start = width * BLOCK_SIZE * threadElemsPerDim * blockIdx.y, a_offset, // pocztek wiersza z A przez ktry bdziemy si przeiterowywa
b_start = BLOCK_SIZE * threadElemsPerDim * blockIdx.x, b_offset; // pocztek kolumny z B przez ktr bdziemy si przeiterowywa
__shared__ float A_shared[BLOCK_SIZE*threadElemsPerDim*BLOCK_SIZE*threadElemsPerDim];
__shared__ float B_shared[BLOCK_SIZE*threadElemsPerDim*BLOCK_SIZE*threadElemsPerDim];
//float C_local = 0.0f;
float C_local[threadElemsPerDim*threadElemsPerDim];
float a_prefetched[threadElemsPerDim*threadElemsPerDim],
b_prefetched[threadElemsPerDim*threadElemsPerDim];
int row, col;
for(row = 0; row < threadElemsPerDim; row++)
{
for(col = 0; col < threadElemsPerDim; col++)
{
a_prefetched[row*threadElemsPerDim+col] = A[a_start + (threadIdx.y + row) * width + threadIdx.x + col];
b_prefetched[row*threadElemsPerDim+col] = B[b_start + (threadIdx.y + row) * width + threadIdx.x + col];
C_local[row*threadElemsPerDim+col] = 0.0f;
}
}
// up: domniemanie poprawnoci
for(int index = 1; index <= gridDim.x; index++) // rwnie dobrze mogoby by gridDim.y bo s rwne
{
//++index;
a_offset = index * BLOCK_SIZE * threadElemsPerDim;
b_offset = index * BLOCK_SIZE * threadElemsPerDim * width;
// <ok>
for(row = 0; row < threadElemsPerDim; row++)
{
for(col = 0; col < threadElemsPerDim; col++)
{
A_shared[(threadIdx.y + row) * blockDim.x * threadElemsPerDim + threadIdx.x + col] = a_prefetched[row*threadElemsPerDim+col];
B_shared[(threadIdx.y + row) * blockDim.x * threadElemsPerDim + threadIdx.x + col] = b_prefetched[row*threadElemsPerDim+col];
}
}
// </ok>
__syncthreads(); // bariera synchronizacyjna, czekamy a wszystkie wtki w bloku wypeni pami wspdzielon
for(row = 0; row < threadElemsPerDim; row++)
{
for(col = 0; col < threadElemsPerDim; col++)
{
if(index < gridDim.x)
{
a_prefetched[row*threadElemsPerDim+col] = A[a_start + a_offset + (threadIdx.y + row) * width + threadIdx.x + col];
b_prefetched[row*threadElemsPerDim+col] = B[b_start + b_offset + (threadIdx.y + row) * width + threadIdx.x + col];
}
for(int k = 0; k < BLOCK_SIZE*threadElemsPerDim; k++)
{
C_local[row*threadElemsPerDim+col] += A_shared[(threadIdx.y + row) * BLOCK_SIZE * threadElemsPerDim + k] * B_shared[k * BLOCK_SIZE * threadElemsPerDim + threadIdx.x + col];
}
}
}
__syncthreads(); // bariera synchronizacyjna, czekamy a wszystkie wtki w bloku oblicz wynik czstkowy
if(index * BLOCK_SIZE * threadElemsPerDim >= width)
break;
}
int c_start = blockIdx.y * width * BLOCK_SIZE * threadElemsPerDim,
c_offset = blockIdx.x * BLOCK_SIZE * threadElemsPerDim;
for(row = 0; row < threadElemsPerDim; row++)
{
for(col = 0; col < threadElemsPerDim; col++)
{
C[c_start + c_offset + width * (threadIdx.y + row) + threadIdx.x + col] = C_local[row*threadElemsPerDim+col];
}
}
}
static float totalTime = 0.0f;
#define THREAD_ELEMENTS_PER_DIM 2
int performImprovedSharedMemMultipleElemsTest(dim3 block_size, int width)
{
hipError_t error;
float *A = (float*)malloc(width*width*sizeof(float));
float *B = (float*)malloc(width*width*sizeof(float));
generateTestMatrix(A, width);
generateTestMatrix(B, width);
float *C = (float*)malloc(width*width*sizeof(float));
memset(C, 0, width*width*sizeof(float));
float *A_d, *B_d, *C_d;
error = hipMalloc((void**)&A_d, width*width*sizeof(float));
if(error != hipSuccess)
{
fprintf(stderr, "Could not allocate memory on the device for matrix A: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
error = hipMalloc((void**)&B_d, width*width*sizeof(float));
if(error != hipSuccess)
{
fprintf(stderr, "Could not allocate memory on the device for matrix B: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
error = hipMalloc((void**)&C_d, width*width*sizeof(float));
if(error != hipSuccess)
{
fprintf(stderr, "Could not allocate memory on the device for matrix C: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
error = hipMemcpy(A_d, A, width*width*sizeof(float), hipMemcpyHostToDevice);
if(error != hipSuccess)
{
fprintf(stderr, "Could not copy data from host to device: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
error = hipMemcpy(B_d, B, width*width*sizeof(float), hipMemcpyHostToDevice);
if(error != hipSuccess)
{
fprintf(stderr, "Could not copy data from host to device: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
error = hipEventRecord(start, NULL);
if(error != hipSuccess)
{
fprintf(stderr, "Could not record start event: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
int grid_side = (int)ceil((float)width/(float)block_size.x/(float)THREAD_ELEMENTS_PER_DIM);
for(int current_test = 0; current_test < TEST_COUNT; current_test++)
{
switch(block_size.x)
{
case 8:
hipLaunchKernelGGL(( matrixMulSharedMemPrefetchMultipleElements<8, THREAD_ELEMENTS_PER_DIM>), dim3(dim3(grid_side, grid_side)), dim3(block_size), 0, 0, C_d, A_d, B_d, width);
break;
case 16:
hipLaunchKernelGGL(( matrixMulSharedMemPrefetchMultipleElements<16, THREAD_ELEMENTS_PER_DIM>), dim3(dim3(grid_side, grid_side)), dim3(block_size), 0, 0, C_d, A_d, B_d, width);
break;
case 22:
hipLaunchKernelGGL(( matrixMulSharedMemPrefetchMultipleElements<22, THREAD_ELEMENTS_PER_DIM>), dim3(dim3(grid_side, grid_side)), dim3(block_size), 0, 0, C_d, A_d, B_d, width);
break;
case 32:
hipLaunchKernelGGL(( matrixMulSharedMemPrefetchMultipleElements<32, THREAD_ELEMENTS_PER_DIM>), dim3(dim3(grid_side, grid_side)), dim3(block_size), 0, 0, C_d, A_d, B_d, width);
break;
}
hipDeviceSynchronize();
}
error = hipEventRecord(stop, NULL);
if(error != hipSuccess)
{
fprintf(stderr, "Could not record stop event: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
error = hipEventSynchronize(stop);
if(error != hipSuccess)
{
fprintf(stderr, "Could not synchronize with stop event: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
totalTime = 0.0f;
error = hipEventElapsedTime(&totalTime, start, stop);
if(error != hipSuccess)
{
fprintf(stderr, "Could not calculate elapsed time: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
float msecPerMatrixMul = totalTime / (float)TEST_COUNT;
double flopsPerMatrixMul = 2.0 * (double)width * (double)width * (double)width;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("%dx%d\t%dx%d\t%dx%d\t%dx%d\t%.3f\t%.2f\n", width, width, block_size.x * THREAD_ELEMENTS_PER_DIM, block_size.y * THREAD_ELEMENTS_PER_DIM, block_size.x, block_size.y, grid_side, grid_side, msecPerMatrixMul, gigaFlops);
error = hipMemcpy(C, C_d, width*width*sizeof(float), hipMemcpyDeviceToHost);
if(error != hipSuccess)
{
fprintf(stderr, "Could not copy data from device to host: %s (line: %d)\n", hipGetErrorString(error), __LINE__);
return -1;
}
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(C_d);
hipFree(B_d);
hipFree(A_d);
free(C);
free(B);
free(A);
hipDeviceReset();
return 0;
}
void performImprovedSharedMemMultipleElemsTests(void)
{
srand((unsigned int)time(NULL));
dim3 blockSizes[] = { dim3(8/THREAD_ELEMENTS_PER_DIM,8/THREAD_ELEMENTS_PER_DIM), dim3(8/THREAD_ELEMENTS_PER_DIM,8/THREAD_ELEMENTS_PER_DIM), dim3(16/THREAD_ELEMENTS_PER_DIM,16/THREAD_ELEMENTS_PER_DIM), dim3(16/THREAD_ELEMENTS_PER_DIM,16/THREAD_ELEMENTS_PER_DIM), dim3(22/THREAD_ELEMENTS_PER_DIM,22/THREAD_ELEMENTS_PER_DIM), dim3(22/THREAD_ELEMENTS_PER_DIM,22/THREAD_ELEMENTS_PER_DIM), dim3(32/THREAD_ELEMENTS_PER_DIM,32/THREAD_ELEMENTS_PER_DIM), dim3(32/THREAD_ELEMENTS_PER_DIM,32/THREAD_ELEMENTS_PER_DIM) };
int matrixSizes[] = { 32, 64, 128 };
for(int i = 0; i < sizeof(matrixSizes)/sizeof(int); i++)
{
for(int j = 0; j < sizeof(blockSizes)/sizeof(dim3); j++)
{
performImprovedSharedMemMultipleElemsTest(blockSizes[j], matrixSizes[i]);
}
}
hipDeviceReset();
}
| d1d649449e184bd6fb2d141c3c18494d2b84cd17.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include "genmatrix.h"
#include <cuda_runtime.h>
template <int BLOCK_SIZE, int threadElemsPerDim> __global__ void matrixMulSharedMemPrefetchMultipleElements(float *C, float *A, float *B, int width) // sprawdzić czemu to nie działa
{
int a_start = width * BLOCK_SIZE * threadElemsPerDim * blockIdx.y, a_offset, // początek wiersza z A przez który będziemy się przeiterowywać
b_start = BLOCK_SIZE * threadElemsPerDim * blockIdx.x, b_offset; // początek kolumny z B przez którą będziemy się przeiterowywać
__shared__ float A_shared[BLOCK_SIZE*threadElemsPerDim*BLOCK_SIZE*threadElemsPerDim];
__shared__ float B_shared[BLOCK_SIZE*threadElemsPerDim*BLOCK_SIZE*threadElemsPerDim];
//float C_local = 0.0f;
float C_local[threadElemsPerDim*threadElemsPerDim];
float a_prefetched[threadElemsPerDim*threadElemsPerDim],
b_prefetched[threadElemsPerDim*threadElemsPerDim];
int row, col;
for(row = 0; row < threadElemsPerDim; row++)
{
for(col = 0; col < threadElemsPerDim; col++)
{
a_prefetched[row*threadElemsPerDim+col] = A[a_start + (threadIdx.y + row) * width + threadIdx.x + col];
b_prefetched[row*threadElemsPerDim+col] = B[b_start + (threadIdx.y + row) * width + threadIdx.x + col];
C_local[row*threadElemsPerDim+col] = 0.0f;
}
}
// up: domniemanie poprawności
for(int index = 1; index <= gridDim.x; index++) // równie dobrze mogłoby być gridDim.y bo są równe
{
//++index;
a_offset = index * BLOCK_SIZE * threadElemsPerDim;
b_offset = index * BLOCK_SIZE * threadElemsPerDim * width;
// <ok>
for(row = 0; row < threadElemsPerDim; row++)
{
for(col = 0; col < threadElemsPerDim; col++)
{
A_shared[(threadIdx.y + row) * blockDim.x * threadElemsPerDim + threadIdx.x + col] = a_prefetched[row*threadElemsPerDim+col];
B_shared[(threadIdx.y + row) * blockDim.x * threadElemsPerDim + threadIdx.x + col] = b_prefetched[row*threadElemsPerDim+col];
}
}
// </ok>
__syncthreads(); // bariera synchronizacyjna, czekamy aż wszystkie wątki w bloku wypełnią pamięć współdzieloną
for(row = 0; row < threadElemsPerDim; row++)
{
for(col = 0; col < threadElemsPerDim; col++)
{
if(index < gridDim.x)
{
a_prefetched[row*threadElemsPerDim+col] = A[a_start + a_offset + (threadIdx.y + row) * width + threadIdx.x + col];
b_prefetched[row*threadElemsPerDim+col] = B[b_start + b_offset + (threadIdx.y + row) * width + threadIdx.x + col];
}
for(int k = 0; k < BLOCK_SIZE*threadElemsPerDim; k++)
{
C_local[row*threadElemsPerDim+col] += A_shared[(threadIdx.y + row) * BLOCK_SIZE * threadElemsPerDim + k] * B_shared[k * BLOCK_SIZE * threadElemsPerDim + threadIdx.x + col];
}
}
}
__syncthreads(); // bariera synchronizacyjna, czekamy aż wszystkie wątki w bloku obliczą wynik cząstkowy
if(index * BLOCK_SIZE * threadElemsPerDim >= width)
break;
}
int c_start = blockIdx.y * width * BLOCK_SIZE * threadElemsPerDim,
c_offset = blockIdx.x * BLOCK_SIZE * threadElemsPerDim;
for(row = 0; row < threadElemsPerDim; row++)
{
for(col = 0; col < threadElemsPerDim; col++)
{
C[c_start + c_offset + width * (threadIdx.y + row) + threadIdx.x + col] = C_local[row*threadElemsPerDim+col];
}
}
}
static float totalTime = 0.0f;
#define THREAD_ELEMENTS_PER_DIM 2
int performImprovedSharedMemMultipleElemsTest(dim3 block_size, int width)
{
cudaError_t error;
float *A = (float*)malloc(width*width*sizeof(float));
float *B = (float*)malloc(width*width*sizeof(float));
generateTestMatrix(A, width);
generateTestMatrix(B, width);
float *C = (float*)malloc(width*width*sizeof(float));
memset(C, 0, width*width*sizeof(float));
float *A_d, *B_d, *C_d;
error = cudaMalloc((void**)&A_d, width*width*sizeof(float));
if(error != cudaSuccess)
{
fprintf(stderr, "Could not allocate memory on the device for matrix A: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
error = cudaMalloc((void**)&B_d, width*width*sizeof(float));
if(error != cudaSuccess)
{
fprintf(stderr, "Could not allocate memory on the device for matrix B: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
error = cudaMalloc((void**)&C_d, width*width*sizeof(float));
if(error != cudaSuccess)
{
fprintf(stderr, "Could not allocate memory on the device for matrix C: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
error = cudaMemcpy(A_d, A, width*width*sizeof(float), cudaMemcpyHostToDevice);
if(error != cudaSuccess)
{
fprintf(stderr, "Could not copy data from host to device: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
error = cudaMemcpy(B_d, B, width*width*sizeof(float), cudaMemcpyHostToDevice);
if(error != cudaSuccess)
{
fprintf(stderr, "Could not copy data from host to device: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
error = cudaEventRecord(start, NULL);
if(error != cudaSuccess)
{
fprintf(stderr, "Could not record start event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
int grid_side = (int)ceil((float)width/(float)block_size.x/(float)THREAD_ELEMENTS_PER_DIM);
for(int current_test = 0; current_test < TEST_COUNT; current_test++)
{
switch(block_size.x)
{
case 8:
matrixMulSharedMemPrefetchMultipleElements<8, THREAD_ELEMENTS_PER_DIM><<<dim3(grid_side, grid_side), block_size>>>(C_d, A_d, B_d, width);
break;
case 16:
matrixMulSharedMemPrefetchMultipleElements<16, THREAD_ELEMENTS_PER_DIM><<<dim3(grid_side, grid_side), block_size>>>(C_d, A_d, B_d, width);
break;
case 22:
matrixMulSharedMemPrefetchMultipleElements<22, THREAD_ELEMENTS_PER_DIM><<<dim3(grid_side, grid_side), block_size>>>(C_d, A_d, B_d, width);
break;
case 32:
matrixMulSharedMemPrefetchMultipleElements<32, THREAD_ELEMENTS_PER_DIM><<<dim3(grid_side, grid_side), block_size>>>(C_d, A_d, B_d, width);
break;
}
cudaDeviceSynchronize();
}
error = cudaEventRecord(stop, NULL);
if(error != cudaSuccess)
{
fprintf(stderr, "Could not record stop event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
error = cudaEventSynchronize(stop);
if(error != cudaSuccess)
{
fprintf(stderr, "Could not synchronize with stop event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
totalTime = 0.0f;
error = cudaEventElapsedTime(&totalTime, start, stop);
if(error != cudaSuccess)
{
fprintf(stderr, "Could not calculate elapsed time: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
float msecPerMatrixMul = totalTime / (float)TEST_COUNT;
double flopsPerMatrixMul = 2.0 * (double)width * (double)width * (double)width;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("%dx%d\t%dx%d\t%dx%d\t%dx%d\t%.3f\t%.2f\n", width, width, block_size.x * THREAD_ELEMENTS_PER_DIM, block_size.y * THREAD_ELEMENTS_PER_DIM, block_size.x, block_size.y, grid_side, grid_side, msecPerMatrixMul, gigaFlops);
error = cudaMemcpy(C, C_d, width*width*sizeof(float), cudaMemcpyDeviceToHost);
if(error != cudaSuccess)
{
fprintf(stderr, "Could not copy data from device to host: %s (line: %d)\n", cudaGetErrorString(error), __LINE__);
return -1;
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(C_d);
cudaFree(B_d);
cudaFree(A_d);
free(C);
free(B);
free(A);
cudaDeviceReset();
return 0;
}
void performImprovedSharedMemMultipleElemsTests(void)
{
srand((unsigned int)time(NULL));
dim3 blockSizes[] = { dim3(8/THREAD_ELEMENTS_PER_DIM,8/THREAD_ELEMENTS_PER_DIM), dim3(8/THREAD_ELEMENTS_PER_DIM,8/THREAD_ELEMENTS_PER_DIM), dim3(16/THREAD_ELEMENTS_PER_DIM,16/THREAD_ELEMENTS_PER_DIM), dim3(16/THREAD_ELEMENTS_PER_DIM,16/THREAD_ELEMENTS_PER_DIM), dim3(22/THREAD_ELEMENTS_PER_DIM,22/THREAD_ELEMENTS_PER_DIM), dim3(22/THREAD_ELEMENTS_PER_DIM,22/THREAD_ELEMENTS_PER_DIM), dim3(32/THREAD_ELEMENTS_PER_DIM,32/THREAD_ELEMENTS_PER_DIM), dim3(32/THREAD_ELEMENTS_PER_DIM,32/THREAD_ELEMENTS_PER_DIM) };
int matrixSizes[] = { 32, 64, 128 };
for(int i = 0; i < sizeof(matrixSizes)/sizeof(int); i++)
{
for(int j = 0; j < sizeof(blockSizes)/sizeof(dim3); j++)
{
performImprovedSharedMemMultipleElemsTest(blockSizes[j], matrixSizes[i]);
}
}
cudaDeviceReset();
}
|
a4288de697dc655aea3bc8ee8088e9525d21075d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*What this code does:
-Generates 245,760 random numbers between 0 and 100 inclusive, using 72 for the random seed, stores them into an array.
-Uses 3 streams to add 8,192 at a time each.
-Copies the sum of each chunk of 8,192 numbers to the CPU.
-Sum each chunk's sum on the CPU.
To compile: nvcc StreamsExample.cu -run
*/
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#define rnd( x ) (x*(float)rand())/((float)RAND_MAX)
#define BIGN 245760
#define N 8192
dim3 dimBlock;
int blocks = (N-1)/(1024) + 1;
dim3 dimGrid;
float *rand_num_CPU, *rand_sum_CPU;
float *rand_num_GPU, *rand_sum_GPU;
void AllocateMemory()
{
rand_num_CPU = (float*)malloc(sizeof(float)*BIGN);//hipHostMalloc((void**)&rand_num_CPU,sizeof(int)*BIGN,hipHostMallocDefault);
rand_sum_CPU = (float*)malloc(sizeof(float)*30);//hipHostMalloc((void**)&rand_sum_CPU,sizeof(int)*30,hipHostMallocDefault);
hipMalloc(&rand_sum_GPU, 30*sizeof(float));
hipMalloc(&rand_num_GPU, sizeof(float)*N);
}
int Innitialize()
{
int i;
dimBlock.x = 1024;
dimGrid.x = blocks;
srand(72);
for(i = 0; i<BIGN; i++)
{
rand_num_CPU[i] = (float)(rnd(100));
}
for(i = 0; i<30; i++)
{
rand_sum_CPU[i] = 0;
}
return(1);
}
__global__ void Add_em_up(float *rand_num_GPU, float *rand_sum_GPU,int stream_number, int loop_number, int n)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id < n)
{
atomicAdd(&rand_sum_GPU[3*loop_number + stream_number-1], rand_num_GPU[id]);
}
}
void Cleanup(float *rand_num_CPU, float *rand_sum_CPU, float *rand_num_GPU, float *rand_sum_GPU)
{
free(rand_num_CPU); free(rand_sum_CPU);
hipFree(rand_num_GPU); hipFree(rand_sum_GPU);
}
void finalSum(float *rand_sum_CPU,int n)
{
for(int i = 1; i<n; i++)
{
rand_sum_CPU[0] += rand_sum_CPU[i];
}
}
int main()
{
timeval start, end;
int i;
gettimeofday(&start, NULL);
AllocateMemory();
Innitialize();
//creating the three different streams
hipStream_t stream_one, stream_two, stream_three;
hipStreamCreate(&stream_one);
hipStreamCreate(&stream_two);
hipStreamCreate(&stream_three);
//makes sure that streams will be useful
hipDeviceProp_t prop;
int whichDevice;
hipGetDevice(&whichDevice);
hipGetDeviceProperties(&prop, whichDevice);
if(!prop.deviceOverlap)
{
printf("Device will not handle overlaps, so no speed up from streams");
return(1);
}
//each stream is summing up 8192 numbers of the total and there is three streams running
hipMemcpyAsync(rand_sum_GPU, rand_sum_CPU, 30*sizeof(float), hipMemcpyHostToDevice);
for(i=0; i<10; i++)
{
hipMemcpyAsync(rand_num_GPU, rand_num_CPU + i*N, N*sizeof(float), hipMemcpyHostToDevice,stream_one);
hipLaunchKernelGGL(( Add_em_up), dim3(dimGrid),dim3(dimBlock), 0,stream_one, rand_num_GPU, rand_sum_GPU,1, i,N);
hipMemcpyAsync(rand_num_GPU, rand_num_CPU + N*(i+1), N*sizeof(float), hipMemcpyHostToDevice,stream_two);
hipLaunchKernelGGL(( Add_em_up), dim3(dimGrid),dim3(dimBlock), 0,stream_two, rand_num_GPU, rand_sum_GPU, 2,i,N);
hipMemcpyAsync(rand_num_GPU, rand_num_CPU + N*(i+2), N*sizeof(float), hipMemcpyHostToDevice,stream_three);
hipLaunchKernelGGL(( Add_em_up), dim3(dimGrid),dim3(dimBlock), 0,stream_three, rand_num_GPU, rand_sum_GPU, 3,i,N);
}
hipMemcpyAsync(rand_sum_CPU, rand_sum_GPU, 30*sizeof(float), hipMemcpyDeviceToHost);
for(i=0; i<30; i++)
{
printf("final sum number %d is %f\n", i, rand_sum_CPU[i]);
}
finalSum(rand_sum_CPU, 30);
printf("The final sum is %f\n", rand_sum_CPU[0]);
gettimeofday(&end, NULL);
float time = ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec));
printf("time in milliseconds: %.15f\n", time/1000);
Cleanup(rand_num_CPU, rand_sum_CPU, rand_num_GPU, rand_sum_GPU);
}
/*
final sum number 0 is 411865.031250
final sum number 1 is 412250.656250
final sum number 2 is 406504.093750
final sum number 3 is 412250.156250
final sum number 4 is 406504.187500
final sum number 5 is 411834.062500
final sum number 6 is 406504.312500
final sum number 7 is 411833.906250
final sum number 8 is 409671.375000
final sum number 9 is 411833.406250
final sum number 10 is 409672.156250
final sum number 11 is 408217.968750
final sum number 12 is 409671.718750
final sum number 13 is 408217.875000
final sum number 14 is 407320.781250
final sum number 15 is 408217.875000
final sum number 16 is 407321.718750
final sum number 17 is 407714.343750
final sum number 18 is 407322.125000
final sum number 19 is 403816.593750
final sum number 20 is 409465.093750
final sum number 21 is 403815.781250
final sum number 22 is 410225.875000
final sum number 23 is 408284.968750
final sum number 24 is 410226.156250
final sum number 25 is 407310.875000
final sum number 26 is 406670.156250
final sum number 27 is 407311.406250
final sum number 28 is 406670.531250
final sum number 29 is 414002.281250
The final sum is 12262527.000000
time in milliseconds: 239.712997436523438
*/
| a4288de697dc655aea3bc8ee8088e9525d21075d.cu | /*What this code does:
-Generates 245,760 random numbers between 0 and 100 inclusive, using 72 for the random seed, stores them into an array.
-Uses 3 streams to add 8,192 at a time each.
-Copies the sum of each chunk of 8,192 numbers to the CPU.
-Sum each chunk's sum on the CPU.
To compile: nvcc StreamsExample.cu -run
*/
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#define rnd( x ) (x*(float)rand())/((float)RAND_MAX)
#define BIGN 245760
#define N 8192
dim3 dimBlock;
int blocks = (N-1)/(1024) + 1;
dim3 dimGrid;
float *rand_num_CPU, *rand_sum_CPU;
float *rand_num_GPU, *rand_sum_GPU;
void AllocateMemory()
{
rand_num_CPU = (float*)malloc(sizeof(float)*BIGN);//cudaHostAlloc((void**)&rand_num_CPU,sizeof(int)*BIGN,cudaHostAllocDefault);
rand_sum_CPU = (float*)malloc(sizeof(float)*30);//cudaHostAlloc((void**)&rand_sum_CPU,sizeof(int)*30,cudaHostAllocDefault);
cudaMalloc(&rand_sum_GPU, 30*sizeof(float));
cudaMalloc(&rand_num_GPU, sizeof(float)*N);
}
int Innitialize()
{
int i;
dimBlock.x = 1024;
dimGrid.x = blocks;
srand(72);
for(i = 0; i<BIGN; i++)
{
rand_num_CPU[i] = (float)(rnd(100));
}
for(i = 0; i<30; i++)
{
rand_sum_CPU[i] = 0;
}
return(1);
}
__global__ void Add_em_up(float *rand_num_GPU, float *rand_sum_GPU,int stream_number, int loop_number, int n)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id < n)
{
atomicAdd(&rand_sum_GPU[3*loop_number + stream_number-1], rand_num_GPU[id]);
}
}
void Cleanup(float *rand_num_CPU, float *rand_sum_CPU, float *rand_num_GPU, float *rand_sum_GPU)
{
free(rand_num_CPU); free(rand_sum_CPU);
cudaFree(rand_num_GPU); cudaFree(rand_sum_GPU);
}
void finalSum(float *rand_sum_CPU,int n)
{
for(int i = 1; i<n; i++)
{
rand_sum_CPU[0] += rand_sum_CPU[i];
}
}
int main()
{
timeval start, end;
int i;
gettimeofday(&start, NULL);
AllocateMemory();
Innitialize();
//creating the three different streams
cudaStream_t stream_one, stream_two, stream_three;
cudaStreamCreate(&stream_one);
cudaStreamCreate(&stream_two);
cudaStreamCreate(&stream_three);
//makes sure that streams will be useful
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop, whichDevice);
if(!prop.deviceOverlap)
{
printf("Device will not handle overlaps, so no speed up from streams");
return(1);
}
//each stream is summing up 8192 numbers of the total and there is three streams running
cudaMemcpyAsync(rand_sum_GPU, rand_sum_CPU, 30*sizeof(float), cudaMemcpyHostToDevice);
for(i=0; i<10; i++)
{
cudaMemcpyAsync(rand_num_GPU, rand_num_CPU + i*N, N*sizeof(float), cudaMemcpyHostToDevice,stream_one);
Add_em_up<<<dimGrid,dimBlock, 0,stream_one>>>(rand_num_GPU, rand_sum_GPU,1, i,N);
cudaMemcpyAsync(rand_num_GPU, rand_num_CPU + N*(i+1), N*sizeof(float), cudaMemcpyHostToDevice,stream_two);
Add_em_up<<<dimGrid,dimBlock, 0,stream_two>>>(rand_num_GPU, rand_sum_GPU, 2,i,N);
cudaMemcpyAsync(rand_num_GPU, rand_num_CPU + N*(i+2), N*sizeof(float), cudaMemcpyHostToDevice,stream_three);
Add_em_up<<<dimGrid,dimBlock, 0,stream_three>>>(rand_num_GPU, rand_sum_GPU, 3,i,N);
}
cudaMemcpyAsync(rand_sum_CPU, rand_sum_GPU, 30*sizeof(float), cudaMemcpyDeviceToHost);
for(i=0; i<30; i++)
{
printf("final sum number %d is %f\n", i, rand_sum_CPU[i]);
}
finalSum(rand_sum_CPU, 30);
printf("The final sum is %f\n", rand_sum_CPU[0]);
gettimeofday(&end, NULL);
float time = ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec));
printf("time in milliseconds: %.15f\n", time/1000);
Cleanup(rand_num_CPU, rand_sum_CPU, rand_num_GPU, rand_sum_GPU);
}
/*
final sum number 0 is 411865.031250
final sum number 1 is 412250.656250
final sum number 2 is 406504.093750
final sum number 3 is 412250.156250
final sum number 4 is 406504.187500
final sum number 5 is 411834.062500
final sum number 6 is 406504.312500
final sum number 7 is 411833.906250
final sum number 8 is 409671.375000
final sum number 9 is 411833.406250
final sum number 10 is 409672.156250
final sum number 11 is 408217.968750
final sum number 12 is 409671.718750
final sum number 13 is 408217.875000
final sum number 14 is 407320.781250
final sum number 15 is 408217.875000
final sum number 16 is 407321.718750
final sum number 17 is 407714.343750
final sum number 18 is 407322.125000
final sum number 19 is 403816.593750
final sum number 20 is 409465.093750
final sum number 21 is 403815.781250
final sum number 22 is 410225.875000
final sum number 23 is 408284.968750
final sum number 24 is 410226.156250
final sum number 25 is 407310.875000
final sum number 26 is 406670.156250
final sum number 27 is 407311.406250
final sum number 28 is 406670.531250
final sum number 29 is 414002.281250
The final sum is 12262527.000000
time in milliseconds: 239.712997436523438
*/
|
ba87f7901a621a5a417d8aff3ddbd7cda8be6709.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace arithm
{
template <size_t src_size, size_t dst_size> struct ArithmFuncTraits
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 1 };
};
template <> struct ArithmFuncTraits<1, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}
//////////////////////////////////////////////////////////////////////////
// addMat
namespace arithm
{
template <typename T, typename D> struct VAdd4;
template <> struct VAdd4<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4<uint, uint>& other) {}
};
template <> struct VAdd4<int, uint> : binary_function<int, int, uint>
{
__device__ __forceinline__ uint operator ()(int a, int b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd4.u32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.u32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4<int, uint>& other) {}
};
template <> struct VAdd4<uint, int> : binary_function<uint, uint, int>
{
__device__ __forceinline__ int operator ()(uint a, uint b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd4.s32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.s32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4<uint, int>& other) {}
};
template <> struct VAdd4<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd4.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.s32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct VAdd2;
template <> struct VAdd2<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2<uint, uint>& other) {}
};
template <> struct VAdd2<uint, int> : binary_function<uint, uint, int>
{
__device__ __forceinline__ int operator ()(uint a, uint b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd2.s32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.s32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2<uint, int>& other) {}
};
template <> struct VAdd2<int, uint> : binary_function<int, int, uint>
{
__device__ __forceinline__ uint operator ()(int a, int b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd2.u32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.u32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2<int, uint>& other) {}
};
template <> struct VAdd2<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd2.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.s32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct AddMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a + b);
}
__device__ __forceinline__ AddMat() {}
__device__ __forceinline__ AddMat(const AddMat& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename D> struct TransformFunctorTraits< arithm::VAdd4<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::VAdd2<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::AddMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename D>
void vadd4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, VAdd4<T, D>(), WithOutMask(), stream);
}
template void vadd4<uint, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vadd4<uint, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vadd4<int, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vadd4<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T, typename D>
void vadd2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, VAdd2<T, D>(), WithOutMask(), stream);
}
template void vadd2<uint, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vadd2<uint, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vadd2<int, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vadd2<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T, typename D>
void addMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), WithOutMask(), stream);
}
template void addMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// addScalar
namespace arithm
{
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
{
S val;
explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
AddScalar<T, S, D> op(static_cast<S>(val));
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// subMat
namespace arithm
{
template <typename T, typename D> struct VSub4;
template <> struct VSub4<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4<uint, uint>& other) {}
};
template <> struct VSub4<int, uint> : binary_function<int, int, uint>
{
__device__ __forceinline__ uint operator ()(int a, int b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub4.u32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.u32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4<int, uint>& other) {}
};
template <> struct VSub4<uint, int> : binary_function<uint, uint, int>
{
__device__ __forceinline__ int operator ()(uint a, uint b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub4.s32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.s32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4<uint, int>& other) {}
};
template <> struct VSub4<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub4.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.s32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct VSub2;
template <> struct VSub2<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2<uint, uint>& other) {}
};
template <> struct VSub2<uint, int> : binary_function<uint, uint, int>
{
__device__ __forceinline__ int operator ()(uint a, uint b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub2.s32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.s32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2<uint, int>& other) {}
};
template <> struct VSub2<int, uint> : binary_function<int, int, uint>
{
__device__ __forceinline__ uint operator ()(int a, int b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub2.u32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.u32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2<int, uint>& other) {}
};
template <> struct VSub2<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub2.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.s32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct SubMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a - b);
}
__device__ __forceinline__ SubMat() {}
__device__ __forceinline__ SubMat(const SubMat& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename D> struct TransformFunctorTraits< arithm::VSub4<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::VSub2<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename D>
void vsub4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, VSub4<T, D>(), WithOutMask(), stream);
}
template void vsub4<uint, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vsub4<uint, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vsub4<int, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vsub4<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T, typename D>
void vsub2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, VSub2<T, D>(), WithOutMask(), stream);
}
template void vsub2<uint, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vsub2<uint, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vsub2<int, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vsub2<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T, typename D>
void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream);
}
template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// subScalar
namespace arithm
{
template <typename T, typename S, typename D>
void subScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
AddScalar<T, S, D> op(-static_cast<S>(val));
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void subScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
//template void subScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void subScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// mulMat
namespace arithm
{
struct Mul_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
__device__ __forceinline__ Mul_8uc4_32f() {}
__device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f& other) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__device__ __forceinline__ Mul_16sc4_32f() {}
__device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f& other) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a * b);
}
__device__ __forceinline__ Mul() {}
__device__ __forceinline__ Mul(const Mul& other) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
}
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, hipStream_t stream)
{
transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream)
{
if (scale == 1)
{
Mul<T, D> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
MulScale<T, S, D> op(static_cast<S>(scale));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// mulScalar
namespace arithm
{
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a * val);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divMat
namespace arithm
{
struct Div_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
if (b != 0)
{
b = 1.0f / b;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
}
return res;
}
};
struct Div_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<short>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<short>(a.w / b))
: make_short4(0,0,0,0);
}
};
template <typename T, typename D> struct Div : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(a / b) : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T> struct Div<T, float> : binary_function<T, T, float>
{
__device__ __forceinline__ float operator ()(T a, T b) const
{
return b != 0 ? static_cast<float>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T> struct Div<T, double> : binary_function<T, T, double>
{
__device__ __forceinline__ double operator ()(T a, T b) const
{
return b != 0 ? static_cast<double>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
{
S scale;
explicit DivScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(scale * a / b) : 0;
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<arithm::Div_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Div<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void divMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, hipStream_t stream)
{
transform(src1, src2, dst, Div_8uc4_32f(), WithOutMask(), stream);
}
void divMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, hipStream_t stream)
{
transform(src1, src2, dst, Div_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void divMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream)
{
if (scale == 1)
{
Div<T, D> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
DivScale<T, S, D> op(static_cast<S>(scale));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void divMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
//template void divMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
template void divMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divScalar
namespace arithm
{
template <typename T, typename S, typename D>
void divScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(1.0 / val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divInv
namespace arithm
{
template <typename T, typename S, typename D> struct DivInv : unary_function<T, D>
{
S val;
explicit DivInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(val / a) : 0;
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void divInv(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
DivInv<T, S, D> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divInv<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
//template void divInv<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
template void divInv<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absDiffMat
namespace arithm
{
template <typename T, typename D> struct VAbsDiff4;
template <> struct VAbsDiff4<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vabsdiff4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vabsdiff.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAbsDiff4() {}
__device__ __forceinline__ VAbsDiff4(const VAbsDiff4<uint, uint>& other) {}
};
template <> struct VAbsDiff4<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vabsdiff4.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vabsdiff.s32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.s32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.s32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.s32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAbsDiff4() {}
__device__ __forceinline__ VAbsDiff4(const VAbsDiff4<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct VAbsDiff2;
template <> struct VAbsDiff2<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vabsdiff2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vabsdiff.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAbsDiff2() {}
__device__ __forceinline__ VAbsDiff2(const VAbsDiff2<uint, uint>& other) {}
};
template <> struct VAbsDiff2<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vabsdiff2.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vabsdiff.s32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.s32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAbsDiff2() {}
__device__ __forceinline__ VAbsDiff2(const VAbsDiff2<int, int>& other) {}
};
////////////////////////////////////
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__device__ __forceinline__ AbsDiffMat() {}
__device__ __forceinline__ AbsDiffMat(const AbsDiffMat& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename D> struct TransformFunctorTraits< arithm::VAbsDiff4<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::VAbsDiff2<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void vabsDiff4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VAbsDiff4<T, T>(), WithOutMask(), stream);
}
template void vabsDiff4<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vabsDiff4<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T>
void vabsDiff2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VAbsDiff2<T, T>(), WithOutMask(), stream);
}
template void vabsDiff2<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vabsDiff2<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absDiffScalar
namespace arithm
{
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
abs_func<S> f;
return saturate_cast<T>(f(a - val));
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T, typename S>
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, hipStream_t stream)
{
AbsDiffScalar<T, S> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
}
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< abs_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void absMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, abs_func<T>(), WithOutMask(), stream);
}
template void absMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void absMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// sqrMat
namespace arithm
{
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return saturate_cast<T>(x * x);
}
__device__ __forceinline__ Sqr() {}
__device__ __forceinline__ Sqr(const Sqr& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::Sqr<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Sqr<T>(), WithOutMask(), stream);
}
template void sqrMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// sqrtMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< sqrt_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrtMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, sqrt_func<T>(), WithOutMask(), stream);
}
template void sqrtMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void sqrtMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// logMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< log_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void logMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, log_func<T>(), WithOutMask(), stream);
}
template void logMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void logMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// expMat
namespace arithm
{
template <typename T> struct Exp : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
exp_func<T> f;
return saturate_cast<T>(f(x));
}
__device__ __forceinline__ Exp() {}
__device__ __forceinline__ Exp(const Exp& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::Exp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void expMat(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Exp<T>(), WithOutMask(), stream);
}
template void expMat<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
template void expMat<double>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpMat
namespace arithm
{
template <class Op, typename T>
struct Cmp : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T a, T b) const
{
Op op;
return -op(a, b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::Cmp<Op, T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T>
void cmpMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
Cmp<Op<T>, T> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, dst, op, WithOutMask(), stream);
}
template <typename T> void cmpMatEq(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatNe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<not_equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLt(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<less, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
cmpMat<less_equal, T>(src1, src2, dst, stream);
}
template void cmpMatEq<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatEq<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatNe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLt<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void cmpMatLe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpScalar
namespace arithm
{
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
template <class Op, typename T, int cn> struct CmpScalar;
template <class Op, typename T>
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
{
const T val;
__host__ explicit CmpScalar(T val_) : val(val_) {}
__device__ __forceinline__ uchar operator()(T src) const
{
Cmp<Op, T> op;
return op(src, val);
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
{
const TYPE_VEC(T, 2) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
{
const TYPE_VEC(T, 3) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
{
const TYPE_VEC(T, 4) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
}
};
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace device
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T, int cn>
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type src_t;
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
src_t val1 = VecTraits<src_t>::make(sval);
CmpScalar<Op<T>, T, cn> op(val1);
transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
}
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<equal_to, T, 1>,
cmpScalar<equal_to, T, 2>,
cmpScalar<equal_to, T, 3>,
cmpScalar<equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<not_equal_to, T, 1>,
cmpScalar<not_equal_to, T, 2>,
cmpScalar<not_equal_to, T, 3>,
cmpScalar<not_equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less, T, 1>,
cmpScalar<less, T, 2>,
cmpScalar<less, T, 3>,
cmpScalar<less, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less_equal, T, 1>,
cmpScalar<less_equal, T, 2>,
cmpScalar<less_equal, T, 3>,
cmpScalar<less_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater, T, 1>,
cmpScalar<greater, T, 2>,
cmpScalar<greater, T, 3>,
cmpScalar<greater, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, hipStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater_equal, T, 1>,
cmpScalar<greater_equal, T, 2>,
cmpScalar<greater_equal, T, 3>,
cmpScalar<greater_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< bit_not<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_and<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_or<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_xor<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitMatNot(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), mask, stream);
else
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatAnd(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatOr(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatXor(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), WithOutMask(), stream);
}
template void bitMatNot<uchar>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatNot<ushort>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatNot<uint>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatAnd<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatAnd<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatAnd<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatOr<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatOr<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatOr<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatXor<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatXor<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
template void bitMatXor<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitScalar
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< binder2nd< bit_and<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_or<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_xor<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitScalarAnd(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarOr(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarXor(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
}
template void bitScalarAnd<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarAnd<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarAnd<uint>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarOr<uint>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
template void bitScalarXor<uint>(PtrStepSzb src1, uint src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
template <typename T> struct VMin4;
template <> struct VMin4<uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmin4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmin.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMin4() {}
__device__ __forceinline__ VMin4(const VMin4& other) {}
};
template <> struct VMin4<int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmin4.s32.s32.s32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmin.s32.s32.s32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.s32.s32.s32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.s32.s32.s32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.s32.s32.s32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMin4() {}
__device__ __forceinline__ VMin4(const VMin4& other) {}
};
////////////////////////////////////
template <typename T> struct VMin2;
template <> struct VMin2<uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmin2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmin.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMin2() {}
__device__ __forceinline__ VMin2(const VMin2& other) {}
};
template <> struct VMin2<int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmin2.s32.s32.s32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmin.s32.s32.s32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.s32.s32.s32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMin2() {}
__device__ __forceinline__ VMin2(const VMin2& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::VMin4<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::VMin2<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void vmin4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VMin4<T>(), WithOutMask(), stream);
}
template <typename T> void vmin2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VMin2<T>(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void vmin4<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vmin4<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vmin2<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vmin2<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
template <typename T> struct VMax4;
template <> struct VMax4<uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmax4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmax.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMax4() {}
__device__ __forceinline__ VMax4(const VMax4& other) {}
};
template <> struct VMax4<int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmax4.s32.s32.s32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmax.s32.s32.s32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.s32.s32.s32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.s32.s32.s32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.s32.s32.s32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMax4() {}
__device__ __forceinline__ VMax4(const VMax4& other) {}
};
////////////////////////////////////
template <typename T> struct VMax2;
template <> struct VMax2<uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmax2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmax.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMax2() {}
__device__ __forceinline__ VMax2(const VMax2& other) {}
};
template <> struct VMax2<int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmax2.s32.s32.s32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmax.s32.s32.s32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.s32.s32.s32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMax2() {}
__device__ __forceinline__ VMax2(const VMax2& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::VMax4<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::VMax2<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void vmax4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VMax4<T>(), WithOutMask(), stream);
}
template <typename T> void vmax2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VMax2<T>(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void vmax4<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vmax4<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vmax2<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void vmax2<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// threshold
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< thresh_binary_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_binary_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_trunc_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T>
void threshold_caller(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, hipStream_t stream)
{
Op<T> op(thresh, maxVal);
transform(src, dst, op, WithOutMask(), stream);
}
template <typename T>
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, hipStream_t stream);
static const caller_t callers[] =
{
threshold_caller<thresh_binary_func, T>,
threshold_caller<thresh_binary_inv_func, T>,
threshold_caller<thresh_trunc_func, T>,
threshold_caller<thresh_to_zero_func, T>,
threshold_caller<thresh_to_zero_inv_func, T>
};
callers[type]((PtrStepSz<T>) src, (PtrStepSz<T>) dst, static_cast<T>(thresh), static_cast<T>(maxVal), stream);
}
template void threshold<uchar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<schar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<ushort>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<short>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<int>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<float>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
template void threshold<double>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// pow
namespace arithm
{
template<typename T, bool Signed = numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
return saturate_cast<T>(__powf((float)e, power));
}
};
template<typename T> struct PowOp<T, true> : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
T res = saturate_cast<T>(__powf((float)e, power));
if ((e < 0) && (1 & static_cast<int>(power)))
res *= -1;
return res;
}
};
template<> struct PowOp<float> : unary_function<float, float>
{
const float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(float e) const
{
return __powf(::fabs(e), power);
}
};
template<> struct PowOp<double> : unary_function<double, double>
{
double power;
PowOp(double power_) : power(power_) {}
__device__ __forceinline__ double operator()(double e) const
{
return ::pow(::fabs(e), power);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::PowOp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template<typename T>
void pow(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, PowOp<T>(power), WithOutMask(), stream);
}
template void pow<uchar>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<schar>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<short>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<ushort>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<int>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<float>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
template void pow<double>(PtrStepSzb src, double power, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// addWeighted
namespace arithm
{
template <typename T> struct UseDouble_
{
enum {value = 0};
};
template <> struct UseDouble_<double>
{
enum {value = 1};
};
template <typename T1, typename T2, typename D> struct UseDouble
{
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
};
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
{
float alpha;
float beta;
float gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
{
double alpha;
double beta;
double gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
{
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{
};
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
{
};
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T1, typename T2, typename D>
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream)
{
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| ba87f7901a621a5a417d8aff3ddbd7cda8be6709.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace arithm
{
template <size_t src_size, size_t dst_size> struct ArithmFuncTraits
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 1 };
};
template <> struct ArithmFuncTraits<1, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<1, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<2, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 1>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 2>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
template <> struct ArithmFuncTraits<4, 4>
{
enum { simple_block_dim_x = 32 };
enum { simple_block_dim_y = 8 };
enum { smart_block_dim_x = 32 };
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}
//////////////////////////////////////////////////////////////////////////
// addMat
namespace arithm
{
template <typename T, typename D> struct VAdd4;
template <> struct VAdd4<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4<uint, uint>& other) {}
};
template <> struct VAdd4<int, uint> : binary_function<int, int, uint>
{
__device__ __forceinline__ uint operator ()(int a, int b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd4.u32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.u32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4<int, uint>& other) {}
};
template <> struct VAdd4<uint, int> : binary_function<uint, uint, int>
{
__device__ __forceinline__ int operator ()(uint a, uint b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd4.s32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.s32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4<uint, int>& other) {}
};
template <> struct VAdd4<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd4.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.s32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd4() {}
__device__ __forceinline__ VAdd4(const VAdd4<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct VAdd2;
template <> struct VAdd2<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2<uint, uint>& other) {}
};
template <> struct VAdd2<uint, int> : binary_function<uint, uint, int>
{
__device__ __forceinline__ int operator ()(uint a, uint b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd2.s32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.s32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2<uint, int>& other) {}
};
template <> struct VAdd2<int, uint> : binary_function<int, int, uint>
{
__device__ __forceinline__ uint operator ()(int a, int b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd2.u32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.u32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.u32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2<int, uint>& other) {}
};
template <> struct VAdd2<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vadd2.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vadd.s32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vadd.s32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAdd2() {}
__device__ __forceinline__ VAdd2(const VAdd2<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct AddMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a + b);
}
__device__ __forceinline__ AddMat() {}
__device__ __forceinline__ AddMat(const AddMat& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename D> struct TransformFunctorTraits< arithm::VAdd4<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::VAdd2<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::AddMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename D>
void vadd4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, VAdd4<T, D>(), WithOutMask(), stream);
}
template void vadd4<uint, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vadd4<uint, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vadd4<int, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vadd4<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T, typename D>
void vadd2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, VAdd2<T, D>(), WithOutMask(), stream);
}
template void vadd2<uint, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vadd2<uint, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vadd2<int, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vadd2<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T, typename D>
void addMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, AddMat<T, D>(), WithOutMask(), stream);
}
template void addMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// addScalar
namespace arithm
{
template <typename T, typename S, typename D> struct AddScalar : unary_function<T, D>
{
S val;
explicit AddScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a + val);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::AddScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void addScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
AddScalar<T, S, D> op(static_cast<S>(val));
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void addScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void addScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// subMat
namespace arithm
{
template <typename T, typename D> struct VSub4;
template <> struct VSub4<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4<uint, uint>& other) {}
};
template <> struct VSub4<int, uint> : binary_function<int, int, uint>
{
__device__ __forceinline__ uint operator ()(int a, int b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub4.u32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.u32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4<int, uint>& other) {}
};
template <> struct VSub4<uint, int> : binary_function<uint, uint, int>
{
__device__ __forceinline__ int operator ()(uint a, uint b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub4.s32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.s32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4<uint, int>& other) {}
};
template <> struct VSub4<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub4.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.s32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub4() {}
__device__ __forceinline__ VSub4(const VSub4<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct VSub2;
template <> struct VSub2<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2<uint, uint>& other) {}
};
template <> struct VSub2<uint, int> : binary_function<uint, uint, int>
{
__device__ __forceinline__ int operator ()(uint a, uint b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub2.s32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.s32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2<uint, int>& other) {}
};
template <> struct VSub2<int, uint> : binary_function<int, int, uint>
{
__device__ __forceinline__ uint operator ()(int a, int b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub2.u32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.u32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.u32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2<int, uint>& other) {}
};
template <> struct VSub2<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vsub2.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vsub.s32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vsub.s32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VSub2() {}
__device__ __forceinline__ VSub2(const VSub2<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct SubMat : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a - b);
}
__device__ __forceinline__ SubMat() {}
__device__ __forceinline__ SubMat(const SubMat& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename D> struct TransformFunctorTraits< arithm::VSub4<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::VSub2<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::SubMat<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename D>
void vsub4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, VSub4<T, D>(), WithOutMask(), stream);
}
template void vsub4<uint, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vsub4<uint, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vsub4<int, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vsub4<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T, typename D>
void vsub2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, VSub2<T, D>(), WithOutMask(), stream);
}
template void vsub2<uint, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vsub2<uint, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vsub2<int, uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vsub2<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T, typename D>
void subMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, SubMat<T, D>(), WithOutMask(), stream);
}
template void subMat<uchar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<uchar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<schar, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<ushort, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<ushort, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<ushort, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<short, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<short, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<short, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<int, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<int, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<float, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subMat<double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subMat<double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// subScalar
namespace arithm
{
template <typename T, typename S, typename D>
void subScalar(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
AddScalar<T, S, D> op(-static_cast<S>(val));
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void subScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
//template void subScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void subScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// mulMat
namespace arithm
{
struct Mul_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
return res;
}
__device__ __forceinline__ Mul_8uc4_32f() {}
__device__ __forceinline__ Mul_8uc4_32f(const Mul_8uc4_32f& other) {}
};
struct Mul_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return make_short4(saturate_cast<short>(a.x * b), saturate_cast<short>(a.y * b),
saturate_cast<short>(a.z * b), saturate_cast<short>(a.w * b));
}
__device__ __forceinline__ Mul_16sc4_32f() {}
__device__ __forceinline__ Mul_16sc4_32f(const Mul_16sc4_32f& other) {}
};
template <typename T, typename D> struct Mul : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(a * b);
}
__device__ __forceinline__ Mul() {}
__device__ __forceinline__ Mul(const Mul& other) {}
};
template <typename T, typename S, typename D> struct MulScale : binary_function<T, T, D>
{
S scale;
explicit MulScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return saturate_cast<D>(scale * a * b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<arithm::Mul_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Mul<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void mulMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, Mul_8uc4_32f(), WithOutMask(), stream);
}
void mulMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
{
transform(src1, src2, dst, Mul_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void mulMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
{
if (scale == 1)
{
Mul<T, D> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
MulScale<T, S, D> op(static_cast<S>(scale));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void mulMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void mulMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void mulMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// mulScalar
namespace arithm
{
template <typename T, typename S, typename D> struct MulScalar : unary_function<T, D>
{
S val;
explicit MulScalar(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return saturate_cast<D>(a * val);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::MulScalar<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void mulScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void mulScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void mulScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void mulScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divMat
namespace arithm
{
struct Div_8uc4_32f : binary_function<uint, float, uint>
{
__device__ __forceinline__ uint operator ()(uint a, float b) const
{
uint res = 0;
if (b != 0)
{
b = 1.0f / b;
res |= (saturate_cast<uchar>((0xffu & (a )) * b) );
res |= (saturate_cast<uchar>((0xffu & (a >> 8)) * b) << 8);
res |= (saturate_cast<uchar>((0xffu & (a >> 16)) * b) << 16);
res |= (saturate_cast<uchar>((0xffu & (a >> 24)) * b) << 24);
}
return res;
}
};
struct Div_16sc4_32f : binary_function<short4, float, short4>
{
__device__ __forceinline__ short4 operator ()(short4 a, float b) const
{
return b != 0 ? make_short4(saturate_cast<short>(a.x / b), saturate_cast<short>(a.y / b),
saturate_cast<short>(a.z / b), saturate_cast<short>(a.w / b))
: make_short4(0,0,0,0);
}
};
template <typename T, typename D> struct Div : binary_function<T, T, D>
{
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(a / b) : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T> struct Div<T, float> : binary_function<T, T, float>
{
__device__ __forceinline__ float operator ()(T a, T b) const
{
return b != 0 ? static_cast<float>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T> struct Div<T, double> : binary_function<T, T, double>
{
__device__ __forceinline__ double operator ()(T a, T b) const
{
return b != 0 ? static_cast<double>(a) / b : 0;
}
__device__ __forceinline__ Div() {}
__device__ __forceinline__ Div(const Div& other) {}
};
template <typename T, typename S, typename D> struct DivScale : binary_function<T, T, D>
{
S scale;
explicit DivScale(S scale_) : scale(scale_) {}
__device__ __forceinline__ D operator ()(T a, T b) const
{
return b != 0 ? saturate_cast<D>(scale * a / b) : 0;
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<arithm::Div_8uc4_32f> : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T, typename D> struct TransformFunctorTraits< arithm::Div<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivScale<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
void divMat_8uc4_32f(PtrStepSz<uint> src1, PtrStepSzf src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
transform(src1, src2, dst, Div_8uc4_32f(), WithOutMask(), stream);
}
void divMat_16sc4_32f(PtrStepSz<short4> src1, PtrStepSzf src2, PtrStepSz<short4> dst, cudaStream_t stream)
{
transform(src1, src2, dst, Div_16sc4_32f(), WithOutMask(), stream);
}
template <typename T, typename S, typename D>
void divMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream)
{
if (scale == 1)
{
Div<T, D> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
else
{
DivScale<T, S, D> op(static_cast<S>(scale));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
}
template void divMat<uchar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<uchar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<schar, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<ushort, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<ushort, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<ushort, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<short, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<short, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<short, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<int, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<int, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<float, float, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<float, float, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<float, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
//template void divMat<double, double, float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
template void divMat<double, double, double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, double scale, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divScalar
namespace arithm
{
template <typename T, typename S, typename D>
void divScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
MulScalar<T, S, D> op(static_cast<S>(1.0 / val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divScalar<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divScalar<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divScalar<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// divInv
namespace arithm
{
template <typename T, typename S, typename D> struct DivInv : unary_function<T, D>
{
S val;
explicit DivInv(S val_) : val(val_) {}
__device__ __forceinline__ D operator ()(T a) const
{
return a != 0 ? saturate_cast<D>(val / a) : 0;
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S, typename D> struct TransformFunctorTraits< arithm::DivInv<T, S, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T, typename S, typename D>
void divInv(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
DivInv<T, S, D> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void divInv<uchar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<uchar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<schar, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<ushort, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<ushort, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<ushort, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<short, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<short, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<short, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<int, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<int, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<float, float, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<float, float, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<float, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, uchar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, schar>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, ushort>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, short>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, int>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
//template void divInv<double, double, float>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
template void divInv<double, double, double>(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absDiffMat
namespace arithm
{
template <typename T, typename D> struct VAbsDiff4;
template <> struct VAbsDiff4<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vabsdiff4.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vabsdiff.u32.u32.u32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.u32.u32.u32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.u32.u32.u32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.u32.u32.u32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAbsDiff4() {}
__device__ __forceinline__ VAbsDiff4(const VAbsDiff4<uint, uint>& other) {}
};
template <> struct VAbsDiff4<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vabsdiff4.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vabsdiff.s32.s32.s32.sat %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.s32.s32.s32.sat %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.s32.s32.s32.sat %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.s32.s32.s32.sat %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAbsDiff4() {}
__device__ __forceinline__ VAbsDiff4(const VAbsDiff4<int, int>& other) {}
};
////////////////////////////////////
template <typename T, typename D> struct VAbsDiff2;
template <> struct VAbsDiff2<uint, uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vabsdiff2.u32.u32.u32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vabsdiff.u32.u32.u32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.u32.u32.u32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAbsDiff2() {}
__device__ __forceinline__ VAbsDiff2(const VAbsDiff2<uint, uint>& other) {}
};
template <> struct VAbsDiff2<int, int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vabsdiff2.s32.s32.s32.sat %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vabsdiff.s32.s32.s32.sat %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vabsdiff.s32.s32.s32.sat %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VAbsDiff2() {}
__device__ __forceinline__ VAbsDiff2(const VAbsDiff2<int, int>& other) {}
};
////////////////////////////////////
__device__ __forceinline__ int _abs(int a)
{
return ::abs(a);
}
__device__ __forceinline__ float _abs(float a)
{
return ::fabsf(a);
}
__device__ __forceinline__ double _abs(double a)
{
return ::fabs(a);
}
template <typename T> struct AbsDiffMat : binary_function<T, T, T>
{
__device__ __forceinline__ T operator ()(T a, T b) const
{
return saturate_cast<T>(_abs(a - b));
}
__device__ __forceinline__ AbsDiffMat() {}
__device__ __forceinline__ AbsDiffMat(const AbsDiffMat& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename D> struct TransformFunctorTraits< arithm::VAbsDiff4<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T, typename D> struct TransformFunctorTraits< arithm::VAbsDiff2<T, D> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(D)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::AbsDiffMat<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void vabsDiff4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VAbsDiff4<T, T>(), WithOutMask(), stream);
}
template void vabsDiff4<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vabsDiff4<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T>
void vabsDiff2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VAbsDiff2<T, T>(), WithOutMask(), stream);
}
template void vabsDiff2<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vabsDiff2<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T>
void absDiffMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, AbsDiffMat<T>(), WithOutMask(), stream);
}
template void absDiffMat<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<schar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<short>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<float>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absDiffScalar
namespace arithm
{
template <typename T, typename S> struct AbsDiffScalar : unary_function<T, T>
{
S val;
explicit AbsDiffScalar(S val_) : val(val_) {}
__device__ __forceinline__ T operator ()(T a) const
{
abs_func<S> f;
return saturate_cast<T>(f(a - val));
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T, typename S> struct TransformFunctorTraits< arithm::AbsDiffScalar<T, S> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T, typename S>
void absDiffScalar(PtrStepSzb src1, double val, PtrStepSzb dst, cudaStream_t stream)
{
AbsDiffScalar<T, S> op(static_cast<S>(val));
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, op, WithOutMask(), stream);
}
template void absDiffScalar<uchar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<schar, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<ushort, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<short, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<int, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<float, float>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void absDiffScalar<double, double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// absMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< abs_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void absMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, abs_func<T>(), WithOutMask(), stream);
}
template void absMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void absMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// sqrMat
namespace arithm
{
template <typename T> struct Sqr : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
return saturate_cast<T>(x * x);
}
__device__ __forceinline__ Sqr() {}
__device__ __forceinline__ Sqr(const Sqr& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::Sqr<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Sqr<T>(), WithOutMask(), stream);
}
template void sqrMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// sqrtMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< sqrt_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void sqrtMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, sqrt_func<T>(), WithOutMask(), stream);
}
template void sqrtMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void sqrtMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// logMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< log_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void logMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, log_func<T>(), WithOutMask(), stream);
}
template void logMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void logMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// expMat
namespace arithm
{
template <typename T> struct Exp : unary_function<T, T>
{
__device__ __forceinline__ T operator ()(T x) const
{
exp_func<T> f;
return saturate_cast<T>(f(x));
}
__device__ __forceinline__ Exp() {}
__device__ __forceinline__ Exp(const Exp& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::Exp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T>
void expMat(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, Exp<T>(), WithOutMask(), stream);
}
template void expMat<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
template void expMat<double>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpMat
namespace arithm
{
template <class Op, typename T>
struct Cmp : binary_function<T, T, uchar>
{
__device__ __forceinline__ uchar operator()(T a, T b) const
{
Op op;
return -op(a, b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::Cmp<Op, T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T>
void cmpMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
Cmp<Op<T>, T> op;
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, dst, op, WithOutMask(), stream);
}
template <typename T> void cmpMatEq(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatNe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<not_equal_to, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLt(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<less, T>(src1, src2, dst, stream);
}
template <typename T> void cmpMatLe(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
cmpMat<less_equal, T>(src1, src2, dst, stream);
}
template void cmpMatEq<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatEq<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatNe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLt<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void cmpMatLe<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// cmpScalar
namespace arithm
{
#define TYPE_VEC(type, cn) typename TypeVec<type, cn>::vec_type
template <class Op, typename T, int cn> struct CmpScalar;
template <class Op, typename T>
struct CmpScalar<Op, T, 1> : unary_function<T, uchar>
{
const T val;
__host__ explicit CmpScalar(T val_) : val(val_) {}
__device__ __forceinline__ uchar operator()(T src) const
{
Cmp<Op, T> op;
return op(src, val);
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 2> : unary_function<TYPE_VEC(T, 2), TYPE_VEC(uchar, 2)>
{
const TYPE_VEC(T, 2) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 2) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 2) operator()(const TYPE_VEC(T, 2) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 2)>::make(op(src.x, val.x), op(src.y, val.y));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 3> : unary_function<TYPE_VEC(T, 3), TYPE_VEC(uchar, 3)>
{
const TYPE_VEC(T, 3) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 3) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 3) operator()(const TYPE_VEC(T, 3) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 3)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z));
}
};
template <class Op, typename T>
struct CmpScalar<Op, T, 4> : unary_function<TYPE_VEC(T, 4), TYPE_VEC(uchar, 4)>
{
const TYPE_VEC(T, 4) val;
__host__ explicit CmpScalar(TYPE_VEC(T, 4) val_) : val(val_) {}
__device__ __forceinline__ TYPE_VEC(uchar, 4) operator()(const TYPE_VEC(T, 4) & src) const
{
Cmp<Op, T> op;
return VecTraits<TYPE_VEC(uchar, 4)>::make(op(src.x, val.x), op(src.y, val.y), op(src.z, val.z), op(src.w, val.w));
}
};
#undef TYPE_VEC
}
namespace cv { namespace gpu { namespace device
{
template <class Op, typename T> struct TransformFunctorTraits< arithm::CmpScalar<Op, T, 1> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(uchar)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T, int cn>
void cmpScalar(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type src_t;
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
T sval[] = {static_cast<T>(val[0]), static_cast<T>(val[1]), static_cast<T>(val[2]), static_cast<T>(val[3])};
src_t val1 = VecTraits<src_t>::make(sval);
CmpScalar<Op<T>, T, cn> op(val1);
transform((PtrStepSz<src_t>) src, (PtrStepSz<dst_t>) dst, op, WithOutMask(), stream);
}
template <typename T> void cmpScalarEq(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<equal_to, T, 1>,
cmpScalar<equal_to, T, 2>,
cmpScalar<equal_to, T, 3>,
cmpScalar<equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarNe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<not_equal_to, T, 1>,
cmpScalar<not_equal_to, T, 2>,
cmpScalar<not_equal_to, T, 3>,
cmpScalar<not_equal_to, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less, T, 1>,
cmpScalar<less, T, 2>,
cmpScalar<less, T, 3>,
cmpScalar<less, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarLe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<less_equal, T, 1>,
cmpScalar<less_equal, T, 2>,
cmpScalar<less_equal, T, 3>,
cmpScalar<less_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGt(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater, T, 1>,
cmpScalar<greater, T, 2>,
cmpScalar<greater, T, 3>,
cmpScalar<greater, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template <typename T> void cmpScalarGe(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb src, double val[4], PtrStepSzb dst, cudaStream_t stream);
static const func_t funcs[] =
{
0,
cmpScalar<greater_equal, T, 1>,
cmpScalar<greater_equal, T, 2>,
cmpScalar<greater_equal, T, 3>,
cmpScalar<greater_equal, T, 4>
};
funcs[cn](src, val, dst, stream);
}
template void cmpScalarEq<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarEq<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarNe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarLe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGt<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<uchar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<schar >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<ushort>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<short >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<int >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<float >(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
template void cmpScalarGe<double>(PtrStepSzb src, int cn, double val[4], PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitMat
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< bit_not<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_and<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_or<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< bit_xor<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitMatNot(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), mask, stream);
else
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, bit_not<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatAnd(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_and<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatOr(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_or<T>(), WithOutMask(), stream);
}
template <typename T> void bitMatXor(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream)
{
if (mask.data)
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), mask, stream);
else
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, bit_xor<T>(), WithOutMask(), stream);
}
template void bitMatNot<uchar>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatNot<ushort>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatNot<uint>(PtrStepSzb src, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatAnd<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatAnd<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatAnd<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatOr<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatOr<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatOr<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatXor<uchar>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatXor<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
template void bitMatXor<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, PtrStepb mask, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////////////////
// bitScalar
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< binder2nd< bit_and<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_or<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< bit_xor<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void bitScalarAnd(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_and<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarOr(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_or<T>(), src2), WithOutMask(), stream);
}
template <typename T> void bitScalarXor(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(bit_xor<T>(), src2), WithOutMask(), stream);
}
template void bitScalarAnd<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarAnd<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarAnd<uint>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarOr<uint>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<uchar>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<ushort>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
template void bitScalarXor<uint>(PtrStepSzb src1, uint src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
template <typename T> struct VMin4;
template <> struct VMin4<uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmin4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmin.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMin4() {}
__device__ __forceinline__ VMin4(const VMin4& other) {}
};
template <> struct VMin4<int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmin4.s32.s32.s32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmin.s32.s32.s32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.s32.s32.s32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.s32.s32.s32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.s32.s32.s32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMin4() {}
__device__ __forceinline__ VMin4(const VMin4& other) {}
};
////////////////////////////////////
template <typename T> struct VMin2;
template <> struct VMin2<uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmin2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmin.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMin2() {}
__device__ __forceinline__ VMin2(const VMin2& other) {}
};
template <> struct VMin2<int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmin2.s32.s32.s32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmin.s32.s32.s32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmin.s32.s32.s32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMin2() {}
__device__ __forceinline__ VMin2(const VMin2& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::VMin4<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::VMin2<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void vmin4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VMin4<T>(), WithOutMask(), stream);
}
template <typename T> void vmin2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VMin2<T>(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void vmin4<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vmin4<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vmin2<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vmin2<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
template <typename T> struct VMax4;
template <> struct VMax4<uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmax4.u32.u32.u32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmax.u32.u32.u32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.u32.u32.u32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.u32.u32.u32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.u32.u32.u32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMax4() {}
__device__ __forceinline__ VMax4(const VMax4& other) {}
};
template <> struct VMax4<int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmax4.s32.s32.s32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmax.s32.s32.s32 %0.b0, %1.b0, %2.b0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.s32.s32.s32 %0.b1, %1.b1, %2.b1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.s32.s32.s32 %0.b2, %1.b2, %2.b2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.s32.s32.s32 %0.b3, %1.b3, %2.b3, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMax4() {}
__device__ __forceinline__ VMax4(const VMax4& other) {}
};
////////////////////////////////////
template <typename T> struct VMax2;
template <> struct VMax2<uint> : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
uint res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmax2.u32.u32.u32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmax.u32.u32.u32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.u32.u32.u32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMax2() {}
__device__ __forceinline__ VMax2(const VMax2& other) {}
};
template <> struct VMax2<int> : binary_function<int, int, int>
{
__device__ __forceinline__ int operator ()(int a, int b) const
{
int res = 0;
#if __CUDA_ARCH__ >= 300
asm("vmax2.s32.s32.s32 %0, %1, %2, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#elif __CUDA_ARCH__ >= 200
asm("vmax.s32.s32.s32 %0.h0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
asm("vmax.s32.s32.s32 %0.h1, %1.h1, %2.h1, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(res));
#endif
return res;
}
__device__ __forceinline__ VMax2() {}
__device__ __forceinline__ VMax2(const VMax2& other) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::VMax4<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< arithm::VMax2<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
////////////////////////////////////
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <typename T> void vmax4(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VMax4<T>(), WithOutMask(), stream);
}
template <typename T> void vmax2(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, VMax2<T>(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void vmax4<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vmax4<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vmax2<uint>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void vmax2<int>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::gpu::device::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// threshold
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< thresh_binary_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_binary_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_trunc_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< thresh_to_zero_inv_func<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template <template <typename> class Op, typename T>
void threshold_caller(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, cudaStream_t stream)
{
Op<T> op(thresh, maxVal);
transform(src, dst, op, WithOutMask(), stream);
}
template <typename T>
void threshold(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> dst, T thresh, T maxVal, cudaStream_t stream);
static const caller_t callers[] =
{
threshold_caller<thresh_binary_func, T>,
threshold_caller<thresh_binary_inv_func, T>,
threshold_caller<thresh_trunc_func, T>,
threshold_caller<thresh_to_zero_func, T>,
threshold_caller<thresh_to_zero_inv_func, T>
};
callers[type]((PtrStepSz<T>) src, (PtrStepSz<T>) dst, static_cast<T>(thresh), static_cast<T>(maxVal), stream);
}
template void threshold<uchar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<schar>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<ushort>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<short>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<int>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<float>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
template void threshold<double>(PtrStepSzb src, PtrStepSzb dst, double thresh, double maxVal, int type, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// pow
namespace arithm
{
template<typename T, bool Signed = numeric_limits<T>::is_signed> struct PowOp : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
return saturate_cast<T>(__powf((float)e, power));
}
};
template<typename T> struct PowOp<T, true> : unary_function<T, T>
{
float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ T operator()(T e) const
{
T res = saturate_cast<T>(__powf((float)e, power));
if ((e < 0) && (1 & static_cast<int>(power)))
res *= -1;
return res;
}
};
template<> struct PowOp<float> : unary_function<float, float>
{
const float power;
PowOp(double power_) : power(static_cast<float>(power_)) {}
__device__ __forceinline__ float operator()(float e) const
{
return __powf(::fabs(e), power);
}
};
template<> struct PowOp<double> : unary_function<double, double>
{
double power;
PowOp(double power_) : power(power_) {}
__device__ __forceinline__ double operator()(double e) const
{
return ::pow(::fabs(e), power);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T> struct TransformFunctorTraits< arithm::PowOp<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
template<typename T>
void pow(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream)
{
transform((PtrStepSz<T>) src, (PtrStepSz<T>) dst, PowOp<T>(power), WithOutMask(), stream);
}
template void pow<uchar>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<schar>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<short>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<ushort>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<int>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<float>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
template void pow<double>(PtrStepSzb src, double power, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// addWeighted
namespace arithm
{
template <typename T> struct UseDouble_
{
enum {value = 0};
};
template <> struct UseDouble_<double>
{
enum {value = 1};
};
template <typename T1, typename T2, typename D> struct UseDouble
{
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
};
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
{
float alpha;
float beta;
float gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
{
double alpha;
double beta;
double gamma;
AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
{
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
};
}
namespace cv { namespace gpu { namespace device
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{
};
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
{
};
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T1, typename T2, typename D>
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream)
{
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
8876521d5c5b70622ddbb2a16acf910dd4edf6be.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <ctime>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <cassert>
using namespace std;
void loadMiddleburyMRFData(const char * filename, int* &data_positive, int* &data_negative, int* &hCue, int* &vCue, int &width, int &height, int &nLabels)
{
FILE * fp;
fp = fopen(filename,"rb");
assert(fp);
assert(fscanf(fp,"%d %d %d",&width,&height,&nLabels)==3);
int i, gt;
for(i = 0; i < width * height; i++)
assert(fscanf(fp,"%d",>)==1);
assert(gt == gt);
assert(nLabels == 2);
data_positive = (int*) malloc(width * height * sizeof(int));
data_negative = (int*) malloc(width * height * sizeof(int));
assert(data_positive && data_negative);
int v;
for(i = 0; i < width * height; i++) {
assert(fscanf(fp,"%d",&v)==1);
data_positive[i] = v;
}
for(i = 0; i < width * height; i++) {
assert(fscanf(fp,"%d",&v)==1);
data_negative[i] = v;
}
hCue = (int*) malloc(width * height * sizeof(int));
vCue = (int*) malloc(width * height * sizeof(int));
assert(hCue && vCue);
int x, y;
for(y = 0; y < height; y++) {
for(x = 0; x < width-1; x++) {
assert(fscanf(fp,"%d",&v)==1);
hCue[x+y*width] = v;
}
}
for(y = 0; y < height-1; y++) {
for(x = 0; x < width; x++) {
assert(fscanf(fp,"%d",&v)==1);
vCue[y*width+x] = v;
}
}
for(x = 0; x < width; x++) {
vCue[(height-1)*width+x] = 0;
hCue[(height-1)*width+x] = 0;
}
fclose(fp);
}
#include "GraphCut.cu"
int main(int argc, char * argv[]) {
if(argc != 2) {
printf("Usage: %s MDF_file\n",argv[0]);
exit(1);
}
int* data_positive, * data_negative, * hCue, * vCue, width, height, nLabels;
loadMiddleburyMRFData(argv[1],data_positive,data_negative,hCue,vCue,width,height,nLabels);
int * d_data_positive, * d_data_negative, * d_up, * d_down, * d_left, * d_right;
CUDA_SAFE_CALL(hipMalloc((void**)&(d_data_positive),sizeof(int)*width*height));
CUDA_SAFE_CALL(hipMalloc((void**)&(d_data_negative),sizeof(int)*width*height));
CUDA_SAFE_CALL(hipMalloc((void**)&(d_up),sizeof(int)*width*height));
CUDA_SAFE_CALL(hipMalloc((void**)&(d_down),sizeof(int)*width*height));
CUDA_SAFE_CALL(hipMalloc((void**)&(d_left),sizeof(int)*width*height));
CUDA_SAFE_CALL(hipMalloc((void**)&(d_right),sizeof(int)*width*height));
CUDA_SAFE_CALL(hipMemcpy(d_data_positive,data_positive,sizeof(int)*width*height,hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_data_negative,data_negative,sizeof(int)*width*height,hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_up,vCue,sizeof(int)*width*height,hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_down,vCue,sizeof(int)*width*height,hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_left,hCue,sizeof(int)*width*height,hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_right,hCue,sizeof(int)*width*height,hipMemcpyHostToDevice));
free(data_positive);
free(data_negative);
free(hCue);
free(vCue);
srand( time(NULL));
printf("Solving a %d x %d MRF problem...\n",height,width);
if(NEIGHBORHOOD != 4)
printf("Please change NEIGHBORHOOD to 4\n");
assert(NEIGHBORHOOD == 4);
GlobalWrapper gw =
GC_Init(width, height, d_data_positive, d_data_negative, 0, d_up, d_down, d_left, d_right);
int * label = (int *) malloc(sizeof(int) * width * height);
assert(label);
GC_Optimize(gw, label);
// print processed image
ofstream face_out;
face_out.open("labelMRF.ppm");
face_out << "P3 " << width << " " << height << " 255 " << endl;
for (unsigned i = 0; i < height; i++) {
for (unsigned j = 0; j < width; j++) {
if (label[i * width + j]) {
face_out << 255 << " " << 255 << " " << 255 << " ";
} else {
face_out << 0 << " " << 0 << " " << 0 << " ";
}
}
face_out << endl;
}
face_out.close();
free(label);
CUDA_SAFE_CALL(hipFree(d_data_positive));
CUDA_SAFE_CALL(hipFree(d_data_negative));
CUDA_SAFE_CALL(hipFree(d_up));
CUDA_SAFE_CALL(hipFree(d_down));
CUDA_SAFE_CALL(hipFree(d_left));
CUDA_SAFE_CALL(hipFree(d_right));
GC_End(&gw);
return 0;
}
| 8876521d5c5b70622ddbb2a16acf910dd4edf6be.cu | #include <iostream>
#include <ctime>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <cassert>
using namespace std;
void loadMiddleburyMRFData(const char * filename, int* &data_positive, int* &data_negative, int* &hCue, int* &vCue, int &width, int &height, int &nLabels)
{
FILE * fp;
fp = fopen(filename,"rb");
assert(fp);
assert(fscanf(fp,"%d %d %d",&width,&height,&nLabels)==3);
int i, gt;
for(i = 0; i < width * height; i++)
assert(fscanf(fp,"%d",>)==1);
assert(gt == gt);
assert(nLabels == 2);
data_positive = (int*) malloc(width * height * sizeof(int));
data_negative = (int*) malloc(width * height * sizeof(int));
assert(data_positive && data_negative);
int v;
for(i = 0; i < width * height; i++) {
assert(fscanf(fp,"%d",&v)==1);
data_positive[i] = v;
}
for(i = 0; i < width * height; i++) {
assert(fscanf(fp,"%d",&v)==1);
data_negative[i] = v;
}
hCue = (int*) malloc(width * height * sizeof(int));
vCue = (int*) malloc(width * height * sizeof(int));
assert(hCue && vCue);
int x, y;
for(y = 0; y < height; y++) {
for(x = 0; x < width-1; x++) {
assert(fscanf(fp,"%d",&v)==1);
hCue[x+y*width] = v;
}
}
for(y = 0; y < height-1; y++) {
for(x = 0; x < width; x++) {
assert(fscanf(fp,"%d",&v)==1);
vCue[y*width+x] = v;
}
}
for(x = 0; x < width; x++) {
vCue[(height-1)*width+x] = 0;
hCue[(height-1)*width+x] = 0;
}
fclose(fp);
}
#include "GraphCut.cu"
int main(int argc, char * argv[]) {
if(argc != 2) {
printf("Usage: %s MDF_file\n",argv[0]);
exit(1);
}
int* data_positive, * data_negative, * hCue, * vCue, width, height, nLabels;
loadMiddleburyMRFData(argv[1],data_positive,data_negative,hCue,vCue,width,height,nLabels);
int * d_data_positive, * d_data_negative, * d_up, * d_down, * d_left, * d_right;
CUDA_SAFE_CALL(cudaMalloc((void**)&(d_data_positive),sizeof(int)*width*height));
CUDA_SAFE_CALL(cudaMalloc((void**)&(d_data_negative),sizeof(int)*width*height));
CUDA_SAFE_CALL(cudaMalloc((void**)&(d_up),sizeof(int)*width*height));
CUDA_SAFE_CALL(cudaMalloc((void**)&(d_down),sizeof(int)*width*height));
CUDA_SAFE_CALL(cudaMalloc((void**)&(d_left),sizeof(int)*width*height));
CUDA_SAFE_CALL(cudaMalloc((void**)&(d_right),sizeof(int)*width*height));
CUDA_SAFE_CALL(cudaMemcpy(d_data_positive,data_positive,sizeof(int)*width*height,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_data_negative,data_negative,sizeof(int)*width*height,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_up,vCue,sizeof(int)*width*height,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_down,vCue,sizeof(int)*width*height,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_left,hCue,sizeof(int)*width*height,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_right,hCue,sizeof(int)*width*height,cudaMemcpyHostToDevice));
free(data_positive);
free(data_negative);
free(hCue);
free(vCue);
srand( time(NULL));
printf("Solving a %d x %d MRF problem...\n",height,width);
if(NEIGHBORHOOD != 4)
printf("Please change NEIGHBORHOOD to 4\n");
assert(NEIGHBORHOOD == 4);
GlobalWrapper gw =
GC_Init(width, height, d_data_positive, d_data_negative, 0, d_up, d_down, d_left, d_right);
int * label = (int *) malloc(sizeof(int) * width * height);
assert(label);
GC_Optimize(gw, label);
// print processed image
ofstream face_out;
face_out.open("labelMRF.ppm");
face_out << "P3 " << width << " " << height << " 255 " << endl;
for (unsigned i = 0; i < height; i++) {
for (unsigned j = 0; j < width; j++) {
if (label[i * width + j]) {
face_out << 255 << " " << 255 << " " << 255 << " ";
} else {
face_out << 0 << " " << 0 << " " << 0 << " ";
}
}
face_out << endl;
}
face_out.close();
free(label);
CUDA_SAFE_CALL(cudaFree(d_data_positive));
CUDA_SAFE_CALL(cudaFree(d_data_negative));
CUDA_SAFE_CALL(cudaFree(d_up));
CUDA_SAFE_CALL(cudaFree(d_down));
CUDA_SAFE_CALL(cudaFree(d_left));
CUDA_SAFE_CALL(cudaFree(d_right));
GC_End(&gw);
return 0;
}
|
4359a67042e52e9281f680737112e6e934aaf739.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <helper_cuda.h>
#include <iomanip>
#include <iostream>
#include <hip/hip_complex.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cufinufft/memtransfer.h>
#include <cufinufft/precision_independent.h>
#include <cufinufft/spreadinterp.h>
using namespace cufinufft::common;
using namespace cufinufft::memtransfer;
#include "spreadinterp1d.cuh"
namespace cufinufft {
namespace spreadinterp {
template <typename T>
int cufinufft_spread1d(int nf1, cuda_complex<T> *d_fw, int M, T *d_kx, cuda_complex<T> *d_c,
cufinufft_plan_t<T> *d_plan)
/*
This c function is written for only doing 1D spreading. See
test/spread1d_test.cu for usage.
note: not allocate,transfer and free memories on gpu.
Melody Shih 11/21/21
*/
{
d_plan->kx = d_kx;
d_plan->c = d_c;
d_plan->fw = d_fw;
int ier;
d_plan->nf1 = nf1;
d_plan->M = M;
d_plan->maxbatchsize = 1;
ier = allocgpumem1d_plan<T>(d_plan);
ier = allocgpumem1d_nupts<T>(d_plan);
if (d_plan->opts.gpu_method == 1) {
ier = cuspread1d_nuptsdriven_prop<T>(nf1, M, d_plan);
if (ier != 0) {
printf("error: cuspread1d_nuptsdriven_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
if (d_plan->opts.gpu_method == 2) {
ier = cuspread1d_subprob_prop<T>(nf1, M, d_plan);
if (ier != 0) {
printf("error: cuspread1d_subprob_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
ier = cuspread1d<T>(d_plan, 1);
freegpumemory1d<T>(d_plan);
return ier;
}
template <typename T>
int cuspread1d(cufinufft_plan_t<T> *d_plan, int blksize)
/*
A wrapper for different spreading methods.
Methods available:
(1) Non-uniform points driven
(2) Subproblem
Melody Shih 11/21/21
*/
{
int nf1 = d_plan->nf1;
int M = d_plan->M;
int ier;
switch (d_plan->opts.gpu_method) {
case 1: {
ier = cuspread1d_nuptsdriven<T>(nf1, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread1d_gpu_nuptsdriven" << std::endl;
return 1;
}
} break;
case 2: {
ier = cuspread1d_subprob<T>(nf1, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread1d_gpu_subprob" << std::endl;
return 1;
}
} break;
default:
std::cout << "error: incorrect method, should be 1,2" << std::endl;
return 2;
}
return ier;
}
template <typename T>
int cuspread1d_nuptsdriven_prop(int nf1, int M, cufinufft_plan_t<T> *d_plan) {
if (d_plan->opts.gpu_sort) {
int bin_size_x = d_plan->opts.gpu_binsizex;
if (bin_size_x < 0) {
std::cout << "error: invalid binsize (binsizex) = (" << bin_size_x << ")" << std::endl;
return 1;
}
int numbins = ceil((T)nf1 / bin_size_x);
T *d_kx = d_plan->kx;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_sortidx = d_plan->sortidx;
int *d_idxnupts = d_plan->idxnupts;
int pirange = d_plan->spopts.pirange;
checkCudaErrors(hipMemset(d_binsize, 0, numbins * sizeof(int)));
hipLaunchKernelGGL(( calc_bin_size_noghost_1d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, M, nf1, bin_size_x, numbins, d_binsize, d_kx,
d_sortidx, pirange);
int n = numbins;
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts);
thrust::exclusive_scan(d_ptr, d_ptr + n, d_result);
hipLaunchKernelGGL(( calc_inverse_of_global_sort_idx_1d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, M, bin_size_x, numbins, d_binstartpts,
d_sortidx, d_kx, d_idxnupts, pirange, nf1);
} else {
int *d_idxnupts = d_plan->idxnupts;
hipLaunchKernelGGL(( trivial_global_sort_index_1d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, M, d_idxnupts);
}
return 0;
}
template <typename T>
int cuspread1d_nuptsdriven(int nf1, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
dim3 threadsPerBlock;
dim3 blocks;
int ns = d_plan->spopts.nspread; // psi's support in terms of number of cells
int pirange = d_plan->spopts.pirange;
int *d_idxnupts = d_plan->idxnupts;
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
T sigma = d_plan->spopts.upsampfac;
T *d_kx = d_plan->kx;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
threadsPerBlock.x = 16;
threadsPerBlock.y = 1;
blocks.x = (M + threadsPerBlock.x - 1) / threadsPerBlock.x;
blocks.y = 1;
if (d_plan->opts.gpu_kerevalmeth) {
for (int t = 0; t < blksize; t++) {
hipLaunchKernelGGL(( spread_1d_nuptsdriven_horner), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_kx, d_c + t * M, d_fw + t * nf1, M, ns, nf1,
sigma, d_idxnupts, pirange);
}
} else {
for (int t = 0; t < blksize; t++) {
hipLaunchKernelGGL(( spread_1d_nuptsdriven), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_kx, d_c + t * M, d_fw + t * nf1, M, ns, nf1, es_c,
es_beta, d_idxnupts, pirange);
}
}
return 0;
}
template <typename T>
int cuspread1d_subprob_prop(int nf1, int M, cufinufft_plan_t<T> *d_plan)
/*
This function determines the properties for spreading that are independent
of the strength of the nodes, only relates to the locations of the nodes,
which only needs to be done once.
*/
{
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
int bin_size_x = d_plan->opts.gpu_binsizex;
if (bin_size_x < 0) {
std::cout << "error: invalid binsize (binsizex) = (";
std::cout << bin_size_x << ")" << std::endl;
return 1;
}
int numbins = ceil((T)nf1 / bin_size_x);
T *d_kx = d_plan->kx;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_sortidx = d_plan->sortidx;
int *d_numsubprob = d_plan->numsubprob;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int *d_subprob_to_bin = NULL;
int pirange = d_plan->spopts.pirange;
checkCudaErrors(hipMemset(d_binsize, 0, numbins * sizeof(int)));
hipLaunchKernelGGL(( calc_bin_size_noghost_1d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, M, nf1, bin_size_x, numbins, d_binsize, d_kx, d_sortidx,
pirange);
int n = numbins;
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts);
thrust::exclusive_scan(d_ptr, d_ptr + n, d_result);
hipLaunchKernelGGL(( calc_inverse_of_global_sort_idx_1d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, M, bin_size_x, numbins, d_binstartpts,
d_sortidx, d_kx, d_idxnupts, pirange, nf1);
hipLaunchKernelGGL(( calc_subprob_1d), dim3((M + 1024 - 1) / 1024), dim3(1024), 0, 0, d_binsize, d_numsubprob, maxsubprobsize, numbins);
d_ptr = thrust::device_pointer_cast(d_numsubprob);
d_result = thrust::device_pointer_cast(d_subprobstartpts + 1);
thrust::inclusive_scan(d_ptr, d_ptr + n, d_result);
checkCudaErrors(hipMemset(d_subprobstartpts, 0, sizeof(int)));
int totalnumsubprob;
checkCudaErrors(hipMemcpy(&totalnumsubprob, &d_subprobstartpts[n], sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipMalloc(&d_subprob_to_bin, totalnumsubprob * sizeof(int)));
hipLaunchKernelGGL(( map_b_into_subprob_1d), dim3((numbins + 1024 - 1) / 1024), dim3(1024), 0, 0, d_subprob_to_bin, d_subprobstartpts, d_numsubprob,
numbins);
assert(d_subprob_to_bin != NULL);
if (d_plan->subprob_to_bin != NULL)
hipFree(d_plan->subprob_to_bin);
d_plan->subprob_to_bin = d_subprob_to_bin;
assert(d_plan->subprob_to_bin != NULL);
d_plan->totalnumsubprob = totalnumsubprob;
return 0;
}
template <typename T>
int cuspread1d_subprob(int nf1, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
int ns = d_plan->spopts.nspread; // psi's support in terms of number of cells
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
// assume that bin_size_x > ns/2;
int bin_size_x = d_plan->opts.gpu_binsizex;
int numbins = ceil((T)nf1 / bin_size_x);
T *d_kx = d_plan->kx;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_numsubprob = d_plan->numsubprob;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int totalnumsubprob = d_plan->totalnumsubprob;
int *d_subprob_to_bin = d_plan->subprob_to_bin;
int pirange = d_plan->spopts.pirange;
T sigma = d_plan->opts.upsampfac;
size_t sharedplanorysize = (bin_size_x + 2 * (int)ceil(ns / 2.0)) * sizeof(cuda_complex<T>);
if (sharedplanorysize > 49152) {
std::cout << "error: not enough shared memory" << std::endl;
return 1;
}
if (d_plan->opts.gpu_kerevalmeth) {
for (int t = 0; t < blksize; t++) {
hipLaunchKernelGGL(( spread_1d_subprob_horner), dim3(totalnumsubprob), dim3(256), sharedplanorysize, 0,
d_kx, d_c + t * M, d_fw + t * nf1, M, ns, nf1, sigma, d_binstartpts, d_binsize, bin_size_x,
d_subprob_to_bin, d_subprobstartpts, d_numsubprob, maxsubprobsize, numbins, d_idxnupts, pirange);
}
} else {
for (int t = 0; t < blksize; t++) {
hipLaunchKernelGGL(( spread_1d_subprob), dim3(totalnumsubprob), dim3(256), sharedplanorysize, 0,
d_kx, d_c + t * M, d_fw + t * nf1, M, ns, nf1, es_c, es_beta, sigma, d_binstartpts, d_binsize,
bin_size_x, d_subprob_to_bin, d_subprobstartpts, d_numsubprob, maxsubprobsize, numbins, d_idxnupts,
pirange);
}
}
return 0;
}
template int cufinufft_spread1d<float>(int nf1, cuda_complex<float> *d_fw, int M, float *d_kx, cuda_complex<float> *d_c,
cufinufft_plan_t<float> *d_plan);
template int cufinufft_spread1d<double>(int nf1, cuda_complex<double> *d_fw, int M, double *d_kx,
cuda_complex<double> *d_c, cufinufft_plan_t<double> *d_plan);
} // namespace spreadinterp
} // namespace cufinufft
| 4359a67042e52e9281f680737112e6e934aaf739.cu | #include <cassert>
#include <helper_cuda.h>
#include <iomanip>
#include <iostream>
#include <cuComplex.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <cufinufft/memtransfer.h>
#include <cufinufft/precision_independent.h>
#include <cufinufft/spreadinterp.h>
using namespace cufinufft::common;
using namespace cufinufft::memtransfer;
#include "spreadinterp1d.cuh"
namespace cufinufft {
namespace spreadinterp {
template <typename T>
int cufinufft_spread1d(int nf1, cuda_complex<T> *d_fw, int M, T *d_kx, cuda_complex<T> *d_c,
cufinufft_plan_t<T> *d_plan)
/*
This c function is written for only doing 1D spreading. See
test/spread1d_test.cu for usage.
note: not allocate,transfer and free memories on gpu.
Melody Shih 11/21/21
*/
{
d_plan->kx = d_kx;
d_plan->c = d_c;
d_plan->fw = d_fw;
int ier;
d_plan->nf1 = nf1;
d_plan->M = M;
d_plan->maxbatchsize = 1;
ier = allocgpumem1d_plan<T>(d_plan);
ier = allocgpumem1d_nupts<T>(d_plan);
if (d_plan->opts.gpu_method == 1) {
ier = cuspread1d_nuptsdriven_prop<T>(nf1, M, d_plan);
if (ier != 0) {
printf("error: cuspread1d_nuptsdriven_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
if (d_plan->opts.gpu_method == 2) {
ier = cuspread1d_subprob_prop<T>(nf1, M, d_plan);
if (ier != 0) {
printf("error: cuspread1d_subprob_prop, method(%d)\n", d_plan->opts.gpu_method);
return ier;
}
}
ier = cuspread1d<T>(d_plan, 1);
freegpumemory1d<T>(d_plan);
return ier;
}
template <typename T>
int cuspread1d(cufinufft_plan_t<T> *d_plan, int blksize)
/*
A wrapper for different spreading methods.
Methods available:
(1) Non-uniform points driven
(2) Subproblem
Melody Shih 11/21/21
*/
{
int nf1 = d_plan->nf1;
int M = d_plan->M;
int ier;
switch (d_plan->opts.gpu_method) {
case 1: {
ier = cuspread1d_nuptsdriven<T>(nf1, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread1d_gpu_nuptsdriven" << std::endl;
return 1;
}
} break;
case 2: {
ier = cuspread1d_subprob<T>(nf1, M, d_plan, blksize);
if (ier != 0) {
std::cout << "error: cnufftspread1d_gpu_subprob" << std::endl;
return 1;
}
} break;
default:
std::cout << "error: incorrect method, should be 1,2" << std::endl;
return 2;
}
return ier;
}
template <typename T>
int cuspread1d_nuptsdriven_prop(int nf1, int M, cufinufft_plan_t<T> *d_plan) {
if (d_plan->opts.gpu_sort) {
int bin_size_x = d_plan->opts.gpu_binsizex;
if (bin_size_x < 0) {
std::cout << "error: invalid binsize (binsizex) = (" << bin_size_x << ")" << std::endl;
return 1;
}
int numbins = ceil((T)nf1 / bin_size_x);
T *d_kx = d_plan->kx;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_sortidx = d_plan->sortidx;
int *d_idxnupts = d_plan->idxnupts;
int pirange = d_plan->spopts.pirange;
checkCudaErrors(cudaMemset(d_binsize, 0, numbins * sizeof(int)));
calc_bin_size_noghost_1d<<<(M + 1024 - 1) / 1024, 1024>>>(M, nf1, bin_size_x, numbins, d_binsize, d_kx,
d_sortidx, pirange);
int n = numbins;
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts);
thrust::exclusive_scan(d_ptr, d_ptr + n, d_result);
calc_inverse_of_global_sort_idx_1d<<<(M + 1024 - 1) / 1024, 1024>>>(M, bin_size_x, numbins, d_binstartpts,
d_sortidx, d_kx, d_idxnupts, pirange, nf1);
} else {
int *d_idxnupts = d_plan->idxnupts;
trivial_global_sort_index_1d<<<(M + 1024 - 1) / 1024, 1024>>>(M, d_idxnupts);
}
return 0;
}
template <typename T>
int cuspread1d_nuptsdriven(int nf1, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
dim3 threadsPerBlock;
dim3 blocks;
int ns = d_plan->spopts.nspread; // psi's support in terms of number of cells
int pirange = d_plan->spopts.pirange;
int *d_idxnupts = d_plan->idxnupts;
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
T sigma = d_plan->spopts.upsampfac;
T *d_kx = d_plan->kx;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
threadsPerBlock.x = 16;
threadsPerBlock.y = 1;
blocks.x = (M + threadsPerBlock.x - 1) / threadsPerBlock.x;
blocks.y = 1;
if (d_plan->opts.gpu_kerevalmeth) {
for (int t = 0; t < blksize; t++) {
spread_1d_nuptsdriven_horner<<<blocks, threadsPerBlock>>>(d_kx, d_c + t * M, d_fw + t * nf1, M, ns, nf1,
sigma, d_idxnupts, pirange);
}
} else {
for (int t = 0; t < blksize; t++) {
spread_1d_nuptsdriven<<<blocks, threadsPerBlock>>>(d_kx, d_c + t * M, d_fw + t * nf1, M, ns, nf1, es_c,
es_beta, d_idxnupts, pirange);
}
}
return 0;
}
template <typename T>
int cuspread1d_subprob_prop(int nf1, int M, cufinufft_plan_t<T> *d_plan)
/*
This function determines the properties for spreading that are independent
of the strength of the nodes, only relates to the locations of the nodes,
which only needs to be done once.
*/
{
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
int bin_size_x = d_plan->opts.gpu_binsizex;
if (bin_size_x < 0) {
std::cout << "error: invalid binsize (binsizex) = (";
std::cout << bin_size_x << ")" << std::endl;
return 1;
}
int numbins = ceil((T)nf1 / bin_size_x);
T *d_kx = d_plan->kx;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_sortidx = d_plan->sortidx;
int *d_numsubprob = d_plan->numsubprob;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int *d_subprob_to_bin = NULL;
int pirange = d_plan->spopts.pirange;
checkCudaErrors(cudaMemset(d_binsize, 0, numbins * sizeof(int)));
calc_bin_size_noghost_1d<<<(M + 1024 - 1) / 1024, 1024>>>(M, nf1, bin_size_x, numbins, d_binsize, d_kx, d_sortidx,
pirange);
int n = numbins;
thrust::device_ptr<int> d_ptr(d_binsize);
thrust::device_ptr<int> d_result(d_binstartpts);
thrust::exclusive_scan(d_ptr, d_ptr + n, d_result);
calc_inverse_of_global_sort_idx_1d<<<(M + 1024 - 1) / 1024, 1024>>>(M, bin_size_x, numbins, d_binstartpts,
d_sortidx, d_kx, d_idxnupts, pirange, nf1);
calc_subprob_1d<<<(M + 1024 - 1) / 1024, 1024>>>(d_binsize, d_numsubprob, maxsubprobsize, numbins);
d_ptr = thrust::device_pointer_cast(d_numsubprob);
d_result = thrust::device_pointer_cast(d_subprobstartpts + 1);
thrust::inclusive_scan(d_ptr, d_ptr + n, d_result);
checkCudaErrors(cudaMemset(d_subprobstartpts, 0, sizeof(int)));
int totalnumsubprob;
checkCudaErrors(cudaMemcpy(&totalnumsubprob, &d_subprobstartpts[n], sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMalloc(&d_subprob_to_bin, totalnumsubprob * sizeof(int)));
map_b_into_subprob_1d<<<(numbins + 1024 - 1) / 1024, 1024>>>(d_subprob_to_bin, d_subprobstartpts, d_numsubprob,
numbins);
assert(d_subprob_to_bin != NULL);
if (d_plan->subprob_to_bin != NULL)
cudaFree(d_plan->subprob_to_bin);
d_plan->subprob_to_bin = d_subprob_to_bin;
assert(d_plan->subprob_to_bin != NULL);
d_plan->totalnumsubprob = totalnumsubprob;
return 0;
}
template <typename T>
int cuspread1d_subprob(int nf1, int M, cufinufft_plan_t<T> *d_plan, int blksize) {
int ns = d_plan->spopts.nspread; // psi's support in terms of number of cells
T es_c = d_plan->spopts.ES_c;
T es_beta = d_plan->spopts.ES_beta;
int maxsubprobsize = d_plan->opts.gpu_maxsubprobsize;
// assume that bin_size_x > ns/2;
int bin_size_x = d_plan->opts.gpu_binsizex;
int numbins = ceil((T)nf1 / bin_size_x);
T *d_kx = d_plan->kx;
cuda_complex<T> *d_c = d_plan->c;
cuda_complex<T> *d_fw = d_plan->fw;
int *d_binsize = d_plan->binsize;
int *d_binstartpts = d_plan->binstartpts;
int *d_numsubprob = d_plan->numsubprob;
int *d_subprobstartpts = d_plan->subprobstartpts;
int *d_idxnupts = d_plan->idxnupts;
int totalnumsubprob = d_plan->totalnumsubprob;
int *d_subprob_to_bin = d_plan->subprob_to_bin;
int pirange = d_plan->spopts.pirange;
T sigma = d_plan->opts.upsampfac;
size_t sharedplanorysize = (bin_size_x + 2 * (int)ceil(ns / 2.0)) * sizeof(cuda_complex<T>);
if (sharedplanorysize > 49152) {
std::cout << "error: not enough shared memory" << std::endl;
return 1;
}
if (d_plan->opts.gpu_kerevalmeth) {
for (int t = 0; t < blksize; t++) {
spread_1d_subprob_horner<<<totalnumsubprob, 256, sharedplanorysize>>>(
d_kx, d_c + t * M, d_fw + t * nf1, M, ns, nf1, sigma, d_binstartpts, d_binsize, bin_size_x,
d_subprob_to_bin, d_subprobstartpts, d_numsubprob, maxsubprobsize, numbins, d_idxnupts, pirange);
}
} else {
for (int t = 0; t < blksize; t++) {
spread_1d_subprob<<<totalnumsubprob, 256, sharedplanorysize>>>(
d_kx, d_c + t * M, d_fw + t * nf1, M, ns, nf1, es_c, es_beta, sigma, d_binstartpts, d_binsize,
bin_size_x, d_subprob_to_bin, d_subprobstartpts, d_numsubprob, maxsubprobsize, numbins, d_idxnupts,
pirange);
}
}
return 0;
}
template int cufinufft_spread1d<float>(int nf1, cuda_complex<float> *d_fw, int M, float *d_kx, cuda_complex<float> *d_c,
cufinufft_plan_t<float> *d_plan);
template int cufinufft_spread1d<double>(int nf1, cuda_complex<double> *d_fw, int M, double *d_kx,
cuda_complex<double> *d_c, cufinufft_plan_t<double> *d_plan);
} // namespace spreadinterp
} // namespace cufinufft
|
27233de4443f4c0f014b57f76c992bb55ecd6694.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 8
#define THREADS 8
__global__ void reduce(float *A, float *result)
{
__shared__ float sdata[THREADS];
int i = blockDim.x*blockIdx.x+threadIdx.x;
sdata[threadIdx.x] = A[i];
for(unsigned s = blockDim.x/2;s > 0; s>>=1)
{
if(threadIdx.x < s && sdata[threadIdx.x] < sdata[threadIdx.x+s])
sdata[threadIdx.x] = sdata[threadIdx.x+s];
__syncthreads();
}
if(threadIdx.x == 0) *result = sdata[0];
}
int main()
{
float A[N], *A_d, *result, *result_d;
int i;
dim3 dimBlock(THREADS);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x);
for (i=0; i<N; i++)
A[i] = N-i;
A[3] = 2*N;
A[N-3] = -N;
hipMalloc((void **) &A_d, sizeof(float)*N);
hipMemcpy(A_d, A, sizeof(float)*N, hipMemcpyHostToDevice);
hipMalloc((void **) &result_d, sizeof(float));
hipLaunchKernelGGL(( reduce), dim3(dimGrid), dim3(dimBlock), 0, 0, A_d, result_d);
result = (float*)malloc(sizeof(float));
hipMemcpy(result, result_d, sizeof(float), hipMemcpyDeviceToHost);
printf("%f\n", *result);
hipFree(A_d);
hipFree(result_d);
hipFree(result);
}
| 27233de4443f4c0f014b57f76c992bb55ecd6694.cu |
#include <stdio.h>
#define N 8
#define THREADS 8
__global__ void reduce(float *A, float *result)
{
__shared__ float sdata[THREADS];
int i = blockDim.x*blockIdx.x+threadIdx.x;
sdata[threadIdx.x] = A[i];
for(unsigned s = blockDim.x/2;s > 0; s>>=1)
{
if(threadIdx.x < s && sdata[threadIdx.x] < sdata[threadIdx.x+s])
sdata[threadIdx.x] = sdata[threadIdx.x+s];
__syncthreads();
}
if(threadIdx.x == 0) *result = sdata[0];
}
int main()
{
float A[N], *A_d, *result, *result_d;
int i;
dim3 dimBlock(THREADS);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x);
for (i=0; i<N; i++)
A[i] = N-i;
A[3] = 2*N;
A[N-3] = -N;
cudaMalloc((void **) &A_d, sizeof(float)*N);
cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMalloc((void **) &result_d, sizeof(float));
reduce<<<dimGrid, dimBlock>>>(A_d, result_d);
result = (float*)malloc(sizeof(float));
cudaMemcpy(result, result_d, sizeof(float), cudaMemcpyDeviceToHost);
printf("%f\n", *result);
cudaFree(A_d);
cudaFree(result_d);
cudaFree(result);
}
|
03b3e53c892b181a180639f35da687370bbb492b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/interpolate_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/interpolate_function.h"
#include "paddle/phi/kernels/primitive/datamover_primitives.h"
namespace phi {
using phi::kps::details::FastDivMod;
template <typename T>
__forceinline__ __device__ void PreCalculatorForLinearInterpInputIndex(
int* in_img_idx,
int* x_id,
T* lambda1,
T* lambda2,
T src_x,
const int in_img_x) {
src_x = (src_x > static_cast<T>(0)) ? src_x : static_cast<T>(0);
*in_img_idx = static_cast<int>(src_x);
*x_id = (*in_img_idx < in_img_x - 1) ? 1 : 0;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
*lambda1 = static_cast<T>(static_cast<MT>(src_x) - *in_img_idx);
*lambda2 = static_cast<T>(1.0) - *lambda1;
}
template <typename T>
__global__ void KeLinearInterpFw(const T* in,
const size_t in_img_w,
const size_t input_w,
T* out,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idx = tid % out_img_w;
} else {
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
MT src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
MT w1lambda = align_flag ? (src_w - in_img_idx)
: (ratio_w * out_img_idx - in_img_idx);
MT w2lambda = 1.0 - w1lambda;
if (data_layout == DataLayout::kNCHW) {
const T* in_pos =
&in[out_id_h * input_w + channel_id * in_img_size + in_img_idx];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
static_cast<T>(w2lambda * static_cast<MT>(in_pos[0]) +
w1lambda * static_cast<MT>(in_pos[w_id]));
} else {
const T* in_pos =
&in[out_id_h * input_w + in_img_idx * num_channels + channel_id];
// linear interpolation
out[out_id_h * output_w + out_id_w] = static_cast<T>(
w2lambda * static_cast<MT>(in_pos[0]) +
w1lambda * static_cast<MT>(in_pos[w_id * num_channels]));
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const bool align_corners) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
// nearest_sampling by multiple read in_addr and write to out_addr
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
out[out_index] = in[in_index];
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int in_img_size = in_img_h * in_img_w;
int out_img_size = out_img_h * out_img_w;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
template <typename T>
__global__ void KeBilinearInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const float align_type_value,
funcs::FastDivModForInterpolate divmods) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idx, in_img_idy, h_id, w_id;
MT h1lambda, w1lambda, h2lambda, w2lambda;
MT src_w = static_cast<MT>(ratio_w * (out_img_idx + align_type_value) -
align_type_value);
MT src_h = static_cast<MT>(ratio_h * (out_img_idy + align_type_value) -
align_type_value);
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
// bilinear interpolation
const T* in_pos =
&in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
out[tid] =
h2lambda * (w2lambda * static_cast<MT>(in_pos[0]) +
w1lambda * static_cast<MT>(in_pos[w_id * num_channels])) +
h1lambda *
(w2lambda *
static_cast<MT>(in_pos[h_id * in_img_w * num_channels]) +
w1lambda * static_cast<MT>(in_pos[h_id * in_img_w * num_channels +
w_id * num_channels]));
}
}
template <typename T>
__global__ void KeBilinearInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const float align_type_value) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
int in_img_idx, in_img_idy, h_id, w_id;
MT h1lambda, w1lambda, h2lambda, w2lambda;
MT src_w = static_cast<MT>(ratio_w * (out_img_idx + align_type_value) -
align_type_value);
MT src_h = static_cast<MT>(ratio_h * (out_img_idy + align_type_value) -
align_type_value);
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
const T* in_pos = &in[in_index];
out[out_index] = static_cast<T>(
h2lambda * (w2lambda * static_cast<MT>(in_pos[0]) +
w1lambda * static_cast<MT>(in_pos[w_id])) +
h1lambda *
(w2lambda * static_cast<MT>(in_pos[h_id * in_img_w]) +
w1lambda * static_cast<MT>(in_pos[h_id * in_img_w + w_id])));
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__device__ __forceinline__ static T Kecubic_interp(
const T x0, const T x1, const T x2, const T x3, T t) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
MT coeffs[4];
MT a = static_cast<MT>(-0.75);
MT x_1 = static_cast<MT>(t);
MT x_2 = static_cast<MT>(1.0) - static_cast<MT>(t);
coeffs[0] = funcs::CubicConvolution2<MT>(x_1 + static_cast<MT>(1.0), a);
coeffs[1] = funcs::CubicConvolution1<MT>(x_1, a);
coeffs[2] = funcs::CubicConvolution1<MT>(x_2, a);
coeffs[3] = funcs::CubicConvolution2<MT>(x_2 + static_cast<MT>(1.0), a);
return static_cast<T>(
static_cast<MT>(x0) * coeffs[0] + static_cast<MT>(x1) * coeffs[1] +
static_cast<MT>(x2) * coeffs[2] + static_cast<MT>(x3) * coeffs[3]);
}
template <typename T>
__global__ void KeBicubicInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idy = (out_id_w % out_img_size) / out_img_w;
out_img_idx = tid % out_img_w;
} else {
out_img_idy = out_id_w / (out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
MT in_img_idy = align_corners ? ratio_h * out_img_idy
: ratio_h * (out_img_idy + 0.5) - 0.5;
int input_y = floorf(static_cast<float>(in_img_idy));
const T y_t = static_cast<T>(in_img_idy - input_y);
MT in_img_idx = align_corners ? ratio_w * out_img_idx
: ratio_w * (out_img_idx + 0.5) - 0.5;
int input_x = floorf(static_cast<float>(in_img_idx));
const T x_t = static_cast<T>(in_img_idx - input_x);
T coefficients[4];
const T* in_pos_0;
const T* in_pos_1;
const T* in_pos_2;
const T* in_pos_3;
int access_x_0;
if (data_layout == DataLayout::kNCHW) {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0);
access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0);
in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_0];
in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_1];
in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_2];
in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_3];
coefficients[k] = Kecubic_interp<T>(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t);
} else {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0);
int access_x_0 =
max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0);
const T* in_pos_0 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_0 * num_channels + channel_id];
const T* in_pos_1 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_1 * num_channels + channel_id];
const T* in_pos_2 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_2 * num_channels + channel_id];
const T* in_pos_3 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_3 * num_channels + channel_id];
coefficients[k] = Kecubic_interp<T>(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t);
}
}
}
template <typename T>
__global__ void KeTrilinearInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = align_flag
? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5)
: static_cast<int>(ratio_d * out_img_idt);
in_img_idt = (in_img_idt > 0) ? in_img_idt : 0;
int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
T src_d = static_cast<T>(ratio_d * (out_img_idt + 0.5) - 0.5);
src_d = (src_d > static_cast<T>(0)) ? src_d : static_cast<T>(0);
T d1lambda = align_flag
? static_cast<T>(static_cast<MT>(src_d) - in_img_idt)
: static_cast<T>(ratio_d * out_img_idt - in_img_idt);
T d2lambda = static_cast<T>(1.0) - d1lambda;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5);
src_h = (src_h > static_cast<T>(0)) ? src_h : static_cast<T>(0);
T h1lambda = align_flag
? static_cast<T>(static_cast<MT>(src_h) - in_img_idy)
: static_cast<T>(ratio_h * out_img_idy - in_img_idy);
T h2lambda = static_cast<T>(1.0) - h1lambda;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5);
src_w = (src_w > static_cast<T>(0)) ? src_w : static_cast<T>(0);
T w1lambda = align_flag
? static_cast<T>(static_cast<MT>(src_w) - in_img_idx)
: static_cast<T>(ratio_w * out_img_idx - in_img_idx);
T w2lambda = static_cast<T>(1.0) - w1lambda;
if (data_layout == DataLayout::kNCHW) {
int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size +
(in_img_idt * in_img_h + in_img_idy) * in_img_w +
in_img_idx;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w] +
w1lambda * in_pos1[h_id * in_img_w + w_id])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w] +
w1lambda * in_pos2[h_id * in_img_w + w_id]));
} else {
int in_pos1_idx = out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] +
w1lambda * in_pos1[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] +
w1lambda * in_pos1[h_id * in_img_w * num_channels +
w_id * num_channels])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] +
w1lambda * in_pos2[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] +
w1lambda * in_pos2[h_id * in_img_w * num_channels +
w_id * num_channels]));
}
}
}
template <typename T>
__global__ void KeNearestNeighbor3DInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w; // ncdhw
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = (align_corners)
? static_cast<int>(ratio_d * out_img_idt + 0.5)
: static_cast<int>(ratio_d * out_img_idt);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
if (data_layout == DataLayout::kNCHW) {
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idt * in_img_h * in_img_w + in_img_idy * in_img_w +
in_img_idx];
} else {
out[tid] = in[out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
}
template <typename T, typename Context>
static void Interpolate1DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_w = new_size[0];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
phi::Copy(dev_ctx, *out_size, phi::CPUPlace(), true, &sizes);
auto size_data = sizes.data<int>();
out_w = size_data[0];
}
}
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_w == out_w) {
phi::Copy(dev_ctx, input, dev_ctx.GetPlace(), false, output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0)
: static_cast<float>(new_scale_w);
}
int64_t in_cw = c * in_w;
int64_t out_cw = c * out_w;
auto pixelNum = n * out_cw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("linear" == interp_method) {
hipLaunchKernelGGL(( KeLinearInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_w,
in_cw,
output_data,
out_w,
n,
out_cw,
c,
ratio_w,
align_corners,
align_mode,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate2DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_h = new_size[0];
out_w = new_size[1];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_w = scale[1];
scale_h = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_w > 0. && scale_h > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
phi::Copy(dev_ctx, *out_size, phi::CPUPlace(), true, &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
}
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_h == out_h && in_w == out_w) {
phi::Copy(dev_ctx, input, dev_ctx.GetPlace(), false, output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_hw = in_h * in_w;
int64_t out_hw = out_h * out_w;
int64_t in_chw = c * in_hw;
int64_t out_chw = c * out_hw;
auto pixelNum = n * out_chw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("nearest" == interp_method) {
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
hipLaunchKernelGGL(( KeNearestNeighborInterpNCHWFw<T>), dim3(config_3d.block_per_grid),
dim3(config_3d.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_corners);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
hipLaunchKernelGGL(( KeNearestNeighborInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
interp_divmods);
}
} else if ("bilinear" == interp_method) {
dim3 thread_num = config.thread_per_block;
#ifdef WITH_NV_JETSON
if (config.compute_capability == 53 || config.compute_capability == 62) {
thread_num = 512;
}
#endif
const float align_type_value =
(align_mode == 0 && !align_corners) ? 0.5f : 0.f;
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
hipLaunchKernelGGL(( KeBilinearInterpNCHWFw<T>), dim3(config_3d.block_per_grid),
dim3(config_3d.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_type_value);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
hipLaunchKernelGGL(( KeBilinearInterpFw<T>)
, dim3(config.block_per_grid), dim3(thread_num), 0, dev_ctx.stream(),
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_type_value,
interp_divmods);
}
} else if ("bicubic" == interp_method) {
constexpr int thread_per_block = 512;
hipLaunchKernelGGL(( KeBicubicInterpFw<T>)
, dim3(config.block_per_grid), dim3(thread_per_block), 0, dev_ctx.stream(),
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate3DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_d = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 2) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 2) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_d > 0. && scale_h > 0. && scale_w > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
phi::Copy(dev_ctx, *out_size, phi::CPUPlace(), true, &sizes);
auto size_data = sizes.data<int>();
out_d = size_data[0];
out_h = size_data[1];
out_w = size_data[2];
}
}
PADDLE_ENFORCE_GT(
out_d,
0,
errors::InvalidArgument("out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_d == out_d && in_h == out_h && in_w == out_w) {
phi::Copy(dev_ctx, input, dev_ctx.GetPlace(), false, output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_dhw = in_d * in_h * in_w;
int64_t out_dhw = out_d * out_h * out_w;
int64_t in_cdhw = c * in_dhw;
int64_t out_cdhw = c * out_dhw;
auto pixelNum = n * out_cdhw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("trilinear" == interp_method) {
hipLaunchKernelGGL(( KeTrilinearInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
align_mode,
data_layout);
} else if ("nearest" == interp_method) {
hipLaunchKernelGGL(( KeNearestNeighbor3DInterpFw<T>), dim3(config.block_per_grid),
dim3(config.thread_per_block),
0,
dev_ctx.stream(), input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
void InterpolateKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto input_dims = x.dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
}
template <typename T, typename Context>
void BilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void NearestInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void TrilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void LinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void BicubicInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp,
GPU,
ALL_LAYOUT,
phi::BilinearInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(nearest_interp,
GPU,
ALL_LAYOUT,
phi::NearestInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(trilinear_interp,
GPU,
ALL_LAYOUT,
phi::TrilinearInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(linear_interp,
GPU,
ALL_LAYOUT,
phi::LinearInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp,
GPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
| 03b3e53c892b181a180639f35da687370bbb492b.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/interpolate_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/interpolate_function.h"
#include "paddle/phi/kernels/primitive/datamover_primitives.h"
namespace phi {
using phi::kps::details::FastDivMod;
template <typename T>
__forceinline__ __device__ void PreCalculatorForLinearInterpInputIndex(
int* in_img_idx,
int* x_id,
T* lambda1,
T* lambda2,
T src_x,
const int in_img_x) {
src_x = (src_x > static_cast<T>(0)) ? src_x : static_cast<T>(0);
*in_img_idx = static_cast<int>(src_x);
*x_id = (*in_img_idx < in_img_x - 1) ? 1 : 0;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
*lambda1 = static_cast<T>(static_cast<MT>(src_x) - *in_img_idx);
*lambda2 = static_cast<T>(1.0) - *lambda1;
}
template <typename T>
__global__ void KeLinearInterpFw(const T* in,
const size_t in_img_w,
const size_t input_w,
T* out,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idx = tid % out_img_w;
} else {
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
MT src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
MT w1lambda = align_flag ? (src_w - in_img_idx)
: (ratio_w * out_img_idx - in_img_idx);
MT w2lambda = 1.0 - w1lambda;
if (data_layout == DataLayout::kNCHW) {
const T* in_pos =
&in[out_id_h * input_w + channel_id * in_img_size + in_img_idx];
// linear interpolation
out[out_id_h * output_w + out_id_w] =
static_cast<T>(w2lambda * static_cast<MT>(in_pos[0]) +
w1lambda * static_cast<MT>(in_pos[w_id]));
} else {
const T* in_pos =
&in[out_id_h * input_w + in_img_idx * num_channels + channel_id];
// linear interpolation
out[out_id_h * output_w + out_id_w] = static_cast<T>(
w2lambda * static_cast<MT>(in_pos[0]) +
w1lambda * static_cast<MT>(in_pos[w_id * num_channels]));
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const bool align_corners) {
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
// nearest_sampling by multiple read in_addr and write to out_addr
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
out[out_index] = in[in_index];
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
funcs::FastDivModForInterpolate divmods) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int in_img_size = in_img_h * in_img_w;
int out_img_size = out_img_h * out_img_w;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
template <typename T>
__global__ void KeBilinearInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const float align_type_value,
funcs::FastDivModForInterpolate divmods) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
auto out_id_divmod = divmods.output_w_div.Divmod(tid);
int out_id_h = out_id_divmod.val[0];
int out_id_w = out_id_divmod.val[1];
int channel_id = divmods.channels_div.Divmod(tid).val[1];
auto outimg_id_divmod = divmods.output_wc_div.Divmod(out_id_w);
int out_img_idy = outimg_id_divmod.val[0];
int out_img_idx =
divmods.channels_div.Divmod(outimg_id_divmod.val[1]).val[0];
int in_img_idx, in_img_idy, h_id, w_id;
MT h1lambda, w1lambda, h2lambda, w2lambda;
MT src_w = static_cast<MT>(ratio_w * (out_img_idx + align_type_value) -
align_type_value);
MT src_h = static_cast<MT>(ratio_h * (out_img_idy + align_type_value) -
align_type_value);
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
// bilinear interpolation
const T* in_pos =
&in[out_id_h * input_w + in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
out[tid] =
h2lambda * (w2lambda * static_cast<MT>(in_pos[0]) +
w1lambda * static_cast<MT>(in_pos[w_id * num_channels])) +
h1lambda *
(w2lambda *
static_cast<MT>(in_pos[h_id * in_img_w * num_channels]) +
w1lambda * static_cast<MT>(in_pos[h_id * in_img_w * num_channels +
w_id * num_channels]));
}
}
template <typename T>
__global__ void KeBilinearInterpNCHWFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t nc,
const float ratio_h,
const float ratio_w,
const float align_type_value) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
int out_img_idx = threadIdx.x + blockIdx.x * blockDim.x;
int out_img_idy = threadIdx.y + blockIdx.y * blockDim.y;
int nc_id = threadIdx.z + blockIdx.z * blockDim.z;
int nc_stride = blockDim.z * gridDim.z;
int in_img_idx, in_img_idy, h_id, w_id;
MT h1lambda, w1lambda, h2lambda, w2lambda;
MT src_w = static_cast<MT>(ratio_w * (out_img_idx + align_type_value) -
align_type_value);
MT src_h = static_cast<MT>(ratio_h * (out_img_idy + align_type_value) -
align_type_value);
PreCalculatorForLinearInterpInputIndex(
&in_img_idx, &w_id, &w1lambda, &w2lambda, src_w, in_img_w);
PreCalculatorForLinearInterpInputIndex(
&in_img_idy, &h_id, &h1lambda, &h2lambda, src_h, in_img_h);
int in_index = (nc_id * in_img_h + in_img_idy) * in_img_w + in_img_idx;
int in_index_stride = nc_stride * in_img_h * in_img_w;
int out_index = (nc_id * out_img_h + out_img_idy) * out_img_w + out_img_idx;
int out_index_stride = nc_stride * out_img_h * out_img_w;
// prevent from multiple threads writing
if (out_img_idx < out_img_w && out_img_idy < out_img_h) {
while (nc_id < nc) {
const T* in_pos = &in[in_index];
out[out_index] = static_cast<T>(
h2lambda * (w2lambda * static_cast<MT>(in_pos[0]) +
w1lambda * static_cast<MT>(in_pos[w_id])) +
h1lambda *
(w2lambda * static_cast<MT>(in_pos[h_id * in_img_w]) +
w1lambda * static_cast<MT>(in_pos[h_id * in_img_w + w_id])));
in_index += in_index_stride;
out_index += out_index_stride;
nc_id += nc_stride;
}
}
}
template <typename T>
__device__ __forceinline__ static T Kecubic_interp(
const T x0, const T x1, const T x2, const T x3, T t) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
MT coeffs[4];
MT a = static_cast<MT>(-0.75);
MT x_1 = static_cast<MT>(t);
MT x_2 = static_cast<MT>(1.0) - static_cast<MT>(t);
coeffs[0] = funcs::CubicConvolution2<MT>(x_1 + static_cast<MT>(1.0), a);
coeffs[1] = funcs::CubicConvolution1<MT>(x_1, a);
coeffs[2] = funcs::CubicConvolution1<MT>(x_2, a);
coeffs[3] = funcs::CubicConvolution2<MT>(x_2 + static_cast<MT>(1.0), a);
return static_cast<T>(
static_cast<MT>(x0) * coeffs[0] + static_cast<MT>(x1) * coeffs[1] +
static_cast<MT>(x2) * coeffs[2] + static_cast<MT>(x3) * coeffs[3]);
}
template <typename T>
__global__ void KeBicubicInterpFw(const T* in,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idy = (out_id_w % out_img_size) / out_img_w;
out_img_idx = tid % out_img_w;
} else {
out_img_idy = out_id_w / (out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
MT in_img_idy = align_corners ? ratio_h * out_img_idy
: ratio_h * (out_img_idy + 0.5) - 0.5;
int input_y = floorf(static_cast<float>(in_img_idy));
const T y_t = static_cast<T>(in_img_idy - input_y);
MT in_img_idx = align_corners ? ratio_w * out_img_idx
: ratio_w * (out_img_idx + 0.5) - 0.5;
int input_x = floorf(static_cast<float>(in_img_idx));
const T x_t = static_cast<T>(in_img_idx - input_x);
T coefficients[4];
const T* in_pos_0;
const T* in_pos_1;
const T* in_pos_2;
const T* in_pos_3;
int access_x_0;
if (data_layout == DataLayout::kNCHW) {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0);
access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0);
in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_0];
in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_1];
in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_2];
in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size +
access_y * in_img_w + access_x_3];
coefficients[k] = Kecubic_interp<T>(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t);
} else {
for (int k = 0; k < 4; k++) {
int access_y =
max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0);
int access_x_0 =
max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_1 =
max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0);
int access_x_2 =
max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0);
int access_x_3 =
max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0);
const T* in_pos_0 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_0 * num_channels + channel_id];
const T* in_pos_1 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_1 * num_channels + channel_id];
const T* in_pos_2 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_2 * num_channels + channel_id];
const T* in_pos_3 =
&in[out_id_h * input_w + access_y * in_img_w * num_channels +
access_x_3 * num_channels + channel_id];
coefficients[k] = Kecubic_interp<T>(
in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t);
}
out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0],
coefficients[1],
coefficients[2],
coefficients[3],
y_t);
}
}
}
template <typename T>
__global__ void KeTrilinearInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const int align_mode,
const DataLayout data_layout) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = align_flag
? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5)
: static_cast<int>(ratio_d * out_img_idt);
in_img_idt = (in_img_idt > 0) ? in_img_idt : 0;
int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0;
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
T src_d = static_cast<T>(ratio_d * (out_img_idt + 0.5) - 0.5);
src_d = (src_d > static_cast<T>(0)) ? src_d : static_cast<T>(0);
T d1lambda = align_flag
? static_cast<T>(static_cast<MT>(src_d) - in_img_idt)
: static_cast<T>(ratio_d * out_img_idt - in_img_idt);
T d2lambda = static_cast<T>(1.0) - d1lambda;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5);
src_h = (src_h > static_cast<T>(0)) ? src_h : static_cast<T>(0);
T h1lambda = align_flag
? static_cast<T>(static_cast<MT>(src_h) - in_img_idy)
: static_cast<T>(ratio_h * out_img_idy - in_img_idy);
T h2lambda = static_cast<T>(1.0) - h1lambda;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5);
src_w = (src_w > static_cast<T>(0)) ? src_w : static_cast<T>(0);
T w1lambda = align_flag
? static_cast<T>(static_cast<MT>(src_w) - in_img_idx)
: static_cast<T>(ratio_w * out_img_idx - in_img_idx);
T w2lambda = static_cast<T>(1.0) - w1lambda;
if (data_layout == DataLayout::kNCHW) {
int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size +
(in_img_idt * in_img_h + in_img_idy) * in_img_w +
in_img_idx;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w] +
w1lambda * in_pos1[h_id * in_img_w + w_id])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w] +
w1lambda * in_pos2[h_id * in_img_w + w_id]));
} else {
int in_pos1_idx = out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id;
const T* in_pos1 = &in[in_pos1_idx];
int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels;
const T* in_pos2 = &in[in_pos2_idx];
// trilinear interpolation
out[out_id_h * output_w + out_id_w] =
d2lambda *
(h2lambda * (w2lambda * in_pos1[0] +
w1lambda * in_pos1[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] +
w1lambda * in_pos1[h_id * in_img_w * num_channels +
w_id * num_channels])) +
d1lambda *
(h2lambda * (w2lambda * in_pos2[0] +
w1lambda * in_pos2[w_id * num_channels]) +
h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] +
w1lambda * in_pos2[h_id * in_img_w * num_channels +
w_id * num_channels]));
}
}
}
template <typename T>
__global__ void KeNearestNeighbor3DInterpFw(const T* in,
const size_t in_img_d,
const size_t in_img_h,
const size_t in_img_w,
const size_t input_h,
const size_t input_w,
T* out,
const size_t out_img_d,
const size_t out_img_h,
const size_t out_img_w,
const size_t output_h,
const size_t output_w,
const size_t num_channels,
const float ratio_d,
const float ratio_h,
const float ratio_w,
const bool align_corners,
const DataLayout data_layout) {
int nthreads = output_h * output_w; // ncdhw
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id, out_img_idt, out_img_idy, out_img_idx;
if (data_layout == DataLayout::kNCHW) {
channel_id = out_id_w / out_img_size;
out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w;
out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h;
out_img_idx = tid % out_img_w;
} else {
out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels);
out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) /
(out_img_w * num_channels);
out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels;
channel_id = tid % num_channels;
}
int in_img_idt = (align_corners)
? static_cast<int>(ratio_d * out_img_idt + 0.5)
: static_cast<int>(ratio_d * out_img_idt);
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
if (data_layout == DataLayout::kNCHW) {
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idt * in_img_h * in_img_w + in_img_idy * in_img_w +
in_img_idx];
} else {
out[tid] = in[out_id_h * input_w +
in_img_idt * in_img_h * in_img_w * num_channels +
in_img_idy * in_img_w * num_channels +
in_img_idx * num_channels + channel_id];
}
}
}
template <typename T, typename Context>
static void Interpolate1DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_w = new_size[0];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
scale_w = scale_data[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
} else {
if (scale.size() > 0) {
scale_w = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
}
}
if (scale_w > 0.) {
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
phi::Copy(dev_ctx, *out_size, phi::CPUPlace(), true, &sizes);
auto size_data = sizes.data<int>();
out_w = size_data[0];
}
}
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_w};
} else {
dim_out = {n, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_w == out_w) {
phi::Copy(dev_ctx, input, dev_ctx.GetPlace(), false, output);
return;
}
float ratio_w = 0.f;
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0)
: static_cast<float>(new_scale_w);
}
int64_t in_cw = c * in_w;
int64_t out_cw = c * out_w;
auto pixelNum = n * out_cw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("linear" == interp_method) {
KeLinearInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_w,
in_cw,
output_data,
out_w,
n,
out_cw,
c,
ratio_w,
align_corners,
align_mode,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate2DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_h = new_size[0];
out_w = new_size[1];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 1) {
scale_h = scale_data[0];
scale_w = scale_data[1];
} else {
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
} else {
if (scale.size() > 1) {
scale_w = scale[1];
scale_h = scale[0];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
}
}
if (scale_w > 0. && scale_h > 0.) {
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
phi::Copy(dev_ctx, *out_size, phi::CPUPlace(), true, &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
}
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_h, out_w};
} else {
dim_out = {n, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_h == out_h && in_w == out_w) {
phi::Copy(dev_ctx, input, dev_ctx.GetPlace(), false, output);
return;
}
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_hw = in_h * in_w;
int64_t out_hw = out_h * out_w;
int64_t in_chw = c * in_hw;
int64_t out_chw = c * out_hw;
auto pixelNum = n * out_chw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("nearest" == interp_method) {
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
KeNearestNeighborInterpNCHWFw<T><<<config_3d.block_per_grid,
config_3d.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_corners);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
KeNearestNeighborInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
interp_divmods);
}
} else if ("bilinear" == interp_method) {
dim3 thread_num = config.thread_per_block;
#ifdef WITH_NV_JETSON
if (config.compute_capability == 53 || config.compute_capability == 62) {
thread_num = 512;
}
#endif
const float align_type_value =
(align_mode == 0 && !align_corners) ? 0.5f : 0.f;
if (data_layout == DataLayout::kNCHW) {
// get launch 3D config
int nc = n * c;
backends::gpu::GpuLaunchConfig config_3d =
backends::gpu::GetGpuLaunchConfig3D(dev_ctx, nc, out_h, out_w);
KeBilinearInterpNCHWFw<T><<<config_3d.block_per_grid,
config_3d.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_h,
in_w,
output_data,
out_h,
out_w,
nc,
ratio_h,
ratio_w,
align_type_value);
} else {
int64_t cw = c * out_w;
auto interp_divmods = funcs::FastDivModForInterpolate(c, out_chw, cw);
KeBilinearInterpFw<T>
<<<config.block_per_grid, thread_num, 0, dev_ctx.stream()>>>(
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_type_value,
interp_divmods);
}
} else if ("bicubic" == interp_method) {
constexpr int thread_per_block = 512;
KeBicubicInterpFw<T>
<<<config.block_per_grid, thread_per_block, 0, dev_ctx.stream()>>>(
input_data,
in_h,
in_w,
n,
in_chw,
output_data,
out_h,
out_w,
n,
out_chw,
c,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
static void Interpolate3DCUDAFwd(
const Context& dev_ctx,
const DenseTensor& input,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout_str,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto* input_data = input.data<T>();
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
int n, c, in_d, in_h, in_w;
funcs::ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w);
float scale_w = -1;
float scale_d = -1;
float scale_h = -1;
if (size_tensor && size_tensor->size() > 0) {
// have size tensor
auto new_size = funcs::get_new_shape(size_tensor.get());
out_d = new_size[0];
out_h = new_size[1];
out_w = new_size[2];
} else {
if (scale_tensor) {
auto scale_data =
funcs::get_new_data_from_tensor<float>(scale_tensor.get_ptr());
if (scale_data.size() > 2) {
scale_d = scale_data[0];
scale_h = scale_data[1];
scale_w = scale_data[2];
} else {
scale_d = scale_data[0];
scale_h = scale_data[0];
scale_w = scale_data[0];
}
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in input 'Scale' Tensor of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
} else {
if (scale.size() > 2) {
scale_d = scale[0];
scale_h = scale[1];
scale_w = scale[2];
PADDLE_ENFORCE_EQ(
scale_w > 0,
true,
errors::InvalidArgument(
"The scale_w in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_w));
PADDLE_ENFORCE_EQ(
scale_h > 0,
true,
errors::InvalidArgument(
"The scale_h in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_h));
PADDLE_ENFORCE_EQ(
scale_d > 0,
true,
errors::InvalidArgument(
"The scale_d in Attr(scale) of Operator(interpolate) "
"should be greater than 0, but received value is %d.",
scale_d));
}
}
if (scale_d > 0. && scale_h > 0. && scale_w > 0.) {
out_d = static_cast<int>(in_d * scale_d);
out_h = static_cast<int>(in_h * scale_h);
out_w = static_cast<int>(in_w * scale_w);
}
if (out_size) {
DenseTensor sizes;
phi::Copy(dev_ctx, *out_size, phi::CPUPlace(), true, &sizes);
auto size_data = sizes.data<int>();
out_d = size_data[0];
out_h = size_data[1];
out_w = size_data[2];
}
}
PADDLE_ENFORCE_GT(
out_d,
0,
errors::InvalidArgument("out_d in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_h,
0,
errors::InvalidArgument("out_h in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
PADDLE_ENFORCE_GT(
out_w,
0,
errors::InvalidArgument("out_w in Attr(out_shape) of Op(interpolate) "
"should be greater than 0."));
phi::DDim dim_out;
if (data_layout == DataLayout::kNCHW) {
dim_out = {n, c, out_d, out_h, out_w};
} else {
dim_out = {n, out_d, out_h, out_w, c};
}
output->Resize(dim_out);
auto output_data = dev_ctx.template Alloc<T>(output);
if (in_d == out_d && in_h == out_h && in_w == out_w) {
phi::Copy(dev_ctx, input, dev_ctx.GetPlace(), false, output);
return;
}
float ratio_d = 0.f;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_d > 1) {
float new_scale_d = 0.f;
new_scale_d = (scale_d > 0) ? static_cast<float>(1. / scale_d)
: static_cast<float>(in_d) / out_d;
ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1)
: static_cast<float>(new_scale_d);
}
if (out_h > 1) {
float new_scale_h = 0.f;
new_scale_h = (scale_h > 0) ? static_cast<float>(1. / scale_h)
: static_cast<float>(in_h) / out_h;
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(new_scale_h);
}
if (out_w > 1) {
float new_scale_w = 0.f;
new_scale_w = (scale_w > 0) ? static_cast<float>(1. / scale_w)
: static_cast<float>(in_w) / out_w;
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(new_scale_w);
}
int64_t in_dhw = in_d * in_h * in_w;
int64_t out_dhw = out_d * out_h * out_w;
int64_t in_cdhw = c * in_dhw;
int64_t out_cdhw = c * out_dhw;
auto pixelNum = n * out_cdhw;
backends::gpu::GpuLaunchConfig config =
backends::gpu::GetGpuLaunchConfig1D(dev_ctx, pixelNum);
if ("trilinear" == interp_method) {
KeTrilinearInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
align_mode,
data_layout);
} else if ("nearest" == interp_method) {
KeNearestNeighbor3DInterpFw<T><<<config.block_per_grid,
config.thread_per_block,
0,
dev_ctx.stream()>>>(input_data,
in_d,
in_h,
in_w,
n,
in_cdhw,
output_data,
out_d,
out_h,
out_w,
n,
out_cdhw,
c,
ratio_d,
ratio_h,
ratio_w,
align_corners,
data_layout);
}
}
template <typename T, typename Context>
void InterpolateKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
auto input_dims = x.dims();
if (input_dims.size() == 3) { // 1D interpolation
Interpolate1DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 4) { // 2D interpolation
Interpolate2DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
} else if (input_dims.size() == 5) { // 3D interpolation
Interpolate3DCUDAFwd<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
}
template <typename T, typename Context>
void BilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void NearestInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void TrilinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void LinearInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
template <typename T, typename Context>
void BicubicInterpKernel(
const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& out_size,
const paddle::optional<std::vector<const DenseTensor*>>& size_tensor,
const paddle::optional<DenseTensor>& scale_tensor,
const std::string& data_layout,
int out_d,
int out_h,
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
DenseTensor* output) {
InterpolateKernel<T, Context>(dev_ctx,
x,
out_size,
size_tensor,
scale_tensor,
data_layout,
out_d,
out_h,
out_w,
scale,
interp_method,
align_corners,
align_mode,
output);
}
} // namespace phi
PD_REGISTER_KERNEL(bilinear_interp,
GPU,
ALL_LAYOUT,
phi::BilinearInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(nearest_interp,
GPU,
ALL_LAYOUT,
phi::NearestInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(trilinear_interp,
GPU,
ALL_LAYOUT,
phi::TrilinearInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(linear_interp,
GPU,
ALL_LAYOUT,
phi::LinearInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
PD_REGISTER_KERNEL(bicubic_interp,
GPU,
ALL_LAYOUT,
phi::BicubicInterpKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int) {
kernel->InputAt(1).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(2).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(3).SetBackend(phi::Backend::ALL_BACKEND);
}
|
88121aca0931ff99b029da08c40f6391d2b85070.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void add_double(int n, double *a, double *b, double *sum) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
sum[i] = a[i] + b[i];
}
} | 88121aca0931ff99b029da08c40f6391d2b85070.cu | #include "includes.h"
extern "C"
__global__ void add_double(int n, double *a, double *b, double *sum) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
sum[i] = a[i] + b[i];
}
} |
9d9f4353131713ac6d4bc065c9a08ac49e055f91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* MIT License
Copyright (c) 2018 Biro Eniko
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "util/common.h"
#include "util/renderer.h"
#include "hitables/sphere.h"
#include "hitables/hitableList.h"
#include "util/camera.h"
#include "materials/material.h"
#include "util/scene.cuh"
#include "util/window.h"
CUDA_DEV int numHitables = 0;
#ifdef CUDA_ENABLED
void initializeWorldCuda(bool showWindow, bool writeImagePPM, bool writeImagePNG, hitable*** list, hitable** world, Window** w, Image** image, Camera** cam, Renderer** renderer)
{
int choice = 4;
switch(choice)
{
case 0:
numHitables = 4;
break;
case 1:
numHitables = 58;
break;
case 2:
numHitables = 901;
break;
case 3:
numHitables = 102;
break;
case 4:
numHitables = 68;
break;
}
// World
checkCudaErrors(hipMallocManaged(list, numHitables*sizeof(hitable*)));
hitable** worldPtr;
checkCudaErrors(hipMallocManaged(&worldPtr, sizeof(hitable*)));
switch(choice)
{
case 0:
hipLaunchKernelGGL(( simpleScene), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 1:
hipLaunchKernelGGL(( simpleScene2), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 2:
hipLaunchKernelGGL(( randomScene), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 3:
hipLaunchKernelGGL(( randomScene2), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
case 4:
hipLaunchKernelGGL(( randomScene3), dim3(1),dim3(1), 0, 0, *list, worldPtr);
break;
}
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
*world = *worldPtr;
checkCudaErrors(hipFree(worldPtr));
// Camera
vec3 lookFrom(13.0f, 2.0f, 3.0f);
vec3 lookAt(0.0f, 0.0f, 0.0f);
checkCudaErrors(hipMallocManaged(cam, sizeof(Camera)));
new (*cam) Camera(lookFrom, lookAt, vec3(0.0f, 1.0f, 0.0f), 20.0f, float(nx)/float(ny), distToFocus);
// Renderer
checkCudaErrors(hipMallocManaged(renderer, sizeof(Renderer)));
new (*renderer) Renderer(showWindow, writeImagePPM, writeImagePNG);
// Image
checkCudaErrors(hipMallocManaged(image, sizeof(Image)));
new (*image) Image(showWindow, writeImagePPM || writeImagePNG, nx, ny, tx, ty);
// Window
if (showWindow)
*w = new Window(*cam, *renderer, nx, ny, thetaInit, phiInit, zoomScale, stepScale);
}
CUDA_GLOBAL void freeWorldCuda(hitable** list, hitable** world)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
{
for (int i = 0; i < numHitables; i++)
{
delete ((sphere *)list[i])->matPtr;
delete list[i];
}
//delete *world;
}
}
void destroyWorldCuda(bool showWindow, hitable** list, hitable* world, Window* w, Image* image, Camera* cam, Renderer* render)
{
hipLaunchKernelGGL(( freeWorldCuda), dim3(1),dim3(1), 0, 0, list, &world);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(cam));
checkCudaErrors(hipFree(render));
checkCudaErrors(hipFree(image));
}
CUDA_GLOBAL void render(Camera* cam, Image* image, hitable* world, Renderer* render, int sampleCount)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= image->nx) || (j >= image->ny))
return;
int pixelIndex = j*image->nx + i;
// Render the samples in batches
for (int s = 0; s < nsBatch; s++)
{
RandomGenerator rng(sampleCount * nsBatch + s, pixelIndex);
float u = float(i + rng.get1f()) / float(image->nx); // left to right
float v = float(j + rng.get1f()) / float(image->ny); // bottom to top
ray r = cam->getRay(rng, u, v);
image->pixels[pixelIndex] += render->color(rng, r, world, 0);
}
vec3 col = image->pixels[pixelIndex] / (sampleCount * nsBatch);
image->pixels2[pixelIndex] = col;
}
CUDA_GLOBAL void display(Image* image)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int pixelIndex = j*image->nx + i;
vec3 col = image->pixels2[pixelIndex];
// Gamma encoding of images is used to optimize the usage of bits
// when encoding an image, or bandwidth used to transport an image,
// by taking advantage of the non-linear manner in which humans perceive
// light and color. (wikipedia)
// we use gamma 2: raising the color to the power 1/gamma (1/2)
col = vec3(sqrt(col[0]), sqrt(col[1]), sqrt(col[2]));
int ir = clamp(int(255.f*col[0]), 0, 255);
int ig = clamp(int(255.f*col[1]), 0, 255);
int ib = clamp(int(255.f*col[2]), 0, 255);
if (image->writeImage)
{
// PNG
int index = (image->ny - 1 - j) * image->nx + i;
int index3 = 3 * index;
image->fileOutputImage[index3 + 0] = ir;
image->fileOutputImage[index3 + 1] = ig;
image->fileOutputImage[index3 + 2] = ib;
}
if (image->showWindow)
image->windowPixels[(image->ny-j-1)*image->nx + i] = (ir << 16) | (ig << 8) | (ib);
}
#endif // CUDA_ENABLED
#ifdef CUDA_ENABLED
void Renderer::cudaRender(Camera* cam, hitable* world, Image* image, int sampleCount)
{
dim3 blocks( (image->nx + image->tx - 1)/image->tx, (image->ny + image->ty - 1)/image->ty);
dim3 threads(image->tx, image->ty);
// Kernel call for the computation of pixel colors.
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, cam, image, world, this, sampleCount);
// Denoise here.
#ifdef OIDN_ENABLED
checkCudaErrors(hipDeviceSynchronize());
image->denoise();
checkCudaErrors(hipDeviceSynchronize());
#endif // OIDN_ENABLED
// Kernel call to fill the output buffers.
hipLaunchKernelGGL(( display), dim3(blocks), dim3(threads), 0, 0, image);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
}
#endif // CUDA_ENABLED
| 9d9f4353131713ac6d4bc065c9a08ac49e055f91.cu | /* MIT License
Copyright (c) 2018 Biro Eniko
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "util/common.h"
#include "util/renderer.h"
#include "hitables/sphere.h"
#include "hitables/hitableList.h"
#include "util/camera.h"
#include "materials/material.h"
#include "util/scene.cuh"
#include "util/window.h"
CUDA_DEV int numHitables = 0;
#ifdef CUDA_ENABLED
void initializeWorldCuda(bool showWindow, bool writeImagePPM, bool writeImagePNG, hitable*** list, hitable** world, Window** w, Image** image, Camera** cam, Renderer** renderer)
{
int choice = 4;
switch(choice)
{
case 0:
numHitables = 4;
break;
case 1:
numHitables = 58;
break;
case 2:
numHitables = 901;
break;
case 3:
numHitables = 102;
break;
case 4:
numHitables = 68;
break;
}
// World
checkCudaErrors(cudaMallocManaged(list, numHitables*sizeof(hitable*)));
hitable** worldPtr;
checkCudaErrors(cudaMallocManaged(&worldPtr, sizeof(hitable*)));
switch(choice)
{
case 0:
simpleScene<<<1,1>>>(*list, worldPtr);
break;
case 1:
simpleScene2<<<1,1>>>(*list, worldPtr);
break;
case 2:
randomScene<<<1,1>>>(*list, worldPtr);
break;
case 3:
randomScene2<<<1,1>>>(*list, worldPtr);
break;
case 4:
randomScene3<<<1,1>>>(*list, worldPtr);
break;
}
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
*world = *worldPtr;
checkCudaErrors(cudaFree(worldPtr));
// Camera
vec3 lookFrom(13.0f, 2.0f, 3.0f);
vec3 lookAt(0.0f, 0.0f, 0.0f);
checkCudaErrors(cudaMallocManaged(cam, sizeof(Camera)));
new (*cam) Camera(lookFrom, lookAt, vec3(0.0f, 1.0f, 0.0f), 20.0f, float(nx)/float(ny), distToFocus);
// Renderer
checkCudaErrors(cudaMallocManaged(renderer, sizeof(Renderer)));
new (*renderer) Renderer(showWindow, writeImagePPM, writeImagePNG);
// Image
checkCudaErrors(cudaMallocManaged(image, sizeof(Image)));
new (*image) Image(showWindow, writeImagePPM || writeImagePNG, nx, ny, tx, ty);
// Window
if (showWindow)
*w = new Window(*cam, *renderer, nx, ny, thetaInit, phiInit, zoomScale, stepScale);
}
CUDA_GLOBAL void freeWorldCuda(hitable** list, hitable** world)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
{
for (int i = 0; i < numHitables; i++)
{
delete ((sphere *)list[i])->matPtr;
delete list[i];
}
//delete *world;
}
}
void destroyWorldCuda(bool showWindow, hitable** list, hitable* world, Window* w, Image* image, Camera* cam, Renderer* render)
{
freeWorldCuda<<<1,1>>>(list, &world);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(cam));
checkCudaErrors(cudaFree(render));
checkCudaErrors(cudaFree(image));
}
CUDA_GLOBAL void render(Camera* cam, Image* image, hitable* world, Renderer* render, int sampleCount)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= image->nx) || (j >= image->ny))
return;
int pixelIndex = j*image->nx + i;
// Render the samples in batches
for (int s = 0; s < nsBatch; s++)
{
RandomGenerator rng(sampleCount * nsBatch + s, pixelIndex);
float u = float(i + rng.get1f()) / float(image->nx); // left to right
float v = float(j + rng.get1f()) / float(image->ny); // bottom to top
ray r = cam->getRay(rng, u, v);
image->pixels[pixelIndex] += render->color(rng, r, world, 0);
}
vec3 col = image->pixels[pixelIndex] / (sampleCount * nsBatch);
image->pixels2[pixelIndex] = col;
}
CUDA_GLOBAL void display(Image* image)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int pixelIndex = j*image->nx + i;
vec3 col = image->pixels2[pixelIndex];
// Gamma encoding of images is used to optimize the usage of bits
// when encoding an image, or bandwidth used to transport an image,
// by taking advantage of the non-linear manner in which humans perceive
// light and color. (wikipedia)
// we use gamma 2: raising the color to the power 1/gamma (1/2)
col = vec3(sqrt(col[0]), sqrt(col[1]), sqrt(col[2]));
int ir = clamp(int(255.f*col[0]), 0, 255);
int ig = clamp(int(255.f*col[1]), 0, 255);
int ib = clamp(int(255.f*col[2]), 0, 255);
if (image->writeImage)
{
// PNG
int index = (image->ny - 1 - j) * image->nx + i;
int index3 = 3 * index;
image->fileOutputImage[index3 + 0] = ir;
image->fileOutputImage[index3 + 1] = ig;
image->fileOutputImage[index3 + 2] = ib;
}
if (image->showWindow)
image->windowPixels[(image->ny-j-1)*image->nx + i] = (ir << 16) | (ig << 8) | (ib);
}
#endif // CUDA_ENABLED
#ifdef CUDA_ENABLED
void Renderer::cudaRender(Camera* cam, hitable* world, Image* image, int sampleCount)
{
dim3 blocks( (image->nx + image->tx - 1)/image->tx, (image->ny + image->ty - 1)/image->ty);
dim3 threads(image->tx, image->ty);
// Kernel call for the computation of pixel colors.
render<<<blocks, threads>>>(cam, image, world, this, sampleCount);
// Denoise here.
#ifdef OIDN_ENABLED
checkCudaErrors(cudaDeviceSynchronize());
image->denoise();
checkCudaErrors(cudaDeviceSynchronize());
#endif // OIDN_ENABLED
// Kernel call to fill the output buffers.
display<<<blocks, threads>>>(image);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
}
#endif // CUDA_ENABLED
|
39adabfde43316d34c32abafa4db4e2eb5ce83a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "absolute_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 __restrict *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
const float4 __restrict *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
int elem_count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
absolute_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,elem_count);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
absolute_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,elem_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
absolute_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, output,input,elem_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 39adabfde43316d34c32abafa4db4e2eb5ce83a4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "absolute_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 __restrict *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
const float4 __restrict *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
int elem_count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
absolute_kernel<<<gridBlock,threadBlock>>>(output,input,elem_count);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
absolute_kernel<<<gridBlock,threadBlock>>>(output,input,elem_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
absolute_kernel<<<gridBlock,threadBlock>>>(output,input,elem_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
082bf42ca4c0f6ff2cf950105dd87f8ae370dd18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from ztrtri_lower.cu normal z -> s, Fri Sep 11 18:29:22 2015
@author Peng Du
@author Tingxing Dong
@author Mark Gates
This file implements lower case, and is called by strtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "strtri.cuh"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
static __device__ void
strtri_diag_lower_device(
magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ float sB[IB*IB];
float y_tx;
// load lower triangle of inner block of A; zero upper triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx >= j && ind < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_S_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_S_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_S_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_S_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_S_ONE / sB[tx + tx*IB];
}
}
// compute elements j+1:IB-1 of j-th column.
for( int j=IB-2; j >= 0; j-- ) {
if ( tx > j ) {
// trmv: y = sB(j+1:IB-1, j+1:IB-1) * sB(j+1:IB-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_S_ZERO;
#pragma unroll
for( int k=j+1; k < IB; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(j+1:IB-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB lower triangular matrix, and B its inverse.
Then the block decomposition
[ A11 0 ] * [ B11 0 ] = [ I 0 ]
[ A21 A22 ] [ B21 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A21*B11 + A22*B21 = 0 ==> B21 = -A22^{-1}*A21*B11 = -B22*A21*B11.
strtri_diag_kernel inverts A11 and A22.
triple_sgemm16 routines multiply:
part 1: B21 = A21 * B11,
part 2: B21 = -B22 * B21.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 0 ]
which contains [ B21 B22 ].
Outer blocks are NB x NB.
A21 may have < jb rows, but is guaranteed to have jb cols since A22 is on
the right. This makes a single check easy to do.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
We split this into part1 & part2 to synchronize all blocks and make sure
that writes to B12 are observed by all blocks.
*/
/*
* B21 = A21 * B11
*/
static __device__ void
triple_sgemm16_part1_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
// emulate 3D grid: NX * (NY*npages)
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// TODO this won't coalesce, will it? unless NX=32 (or maybe 16 with floats, or 8 with float-real)
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// TODO instead of writing result, copy it to sB and do part 2.
// Would only work for jb=16, because only then does rC fit into sB.
// If sB were [NT][16+], then rC would fit into sB.
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
static __device__ void
triple_sgemm16_part2_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
// TODO factor this out:
// gemm16<NX, NY> computes NT x 16 block of C:
// C(1:nt, 1:16) = A(1:nt, 1:jb) * B(1:jb, 1:16)
// where NT = NX * NY.
// part 1: gemm16<4,4>( /*NT, 16,*/ jb, 1, A21, lda, B11, NB, /*0*/, B21, NB, n, ind, tx, ty );
// part 2: gemm16<4,4>( /*NT, 16,*/ jb, -1, B22, NB, B21, NB, /*0*/, B21, NB, n, ind, tx, ty ); // okay for C to overwrite B
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
static __device__ void
triple_sgemm32_part1_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
static __device__ void
triple_sgemm32_part2_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
static __device__ void
triple_sgemm64_part1_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
static __device__ void
triple_sgemm64_part2_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
static __device__ void
triple_sgemm_above64_part1_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B21 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb*NB; // B21; write to B12 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
static __device__ void
triple_sgemm_above64_part2_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
B = d_dinvA + jb*NB; // B21, read from B12 temp location
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B12 temp location
*/
static __device__ void
triple_sgemm_above64_part3_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B12 temp location
float *B12;
int ldb = NB;
B12 = d_dinvA + jb*NB;
B12 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B12[i*ldb] = MAGMA_S_ZERO;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
strtri_diag_lower_kernel(
magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA)
{
strtri_diag_lower_device(diag, n, A, lda, d_dinvA);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm16_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm16_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm32_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm32_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm64_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm64_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part3_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
strtri_diag_lower_kernel_batched(
magma_diag_t diag, int n, float const * const * dA_array, int lda, float **dinvA_array)
{
int batchid = blockIdx.z;
strtri_diag_lower_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm16_part1_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm16_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm16_part2_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm16_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm32_part1_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm32_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm32_part2_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm32_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm64_part1_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm64_part2_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part1_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm_above64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part2_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm_above64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part3_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm_above64_part3_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
| 082bf42ca4c0f6ff2cf950105dd87f8ae370dd18.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from ztrtri_lower.cu normal z -> s, Fri Sep 11 18:29:22 2015
@author Peng Du
@author Tingxing Dong
@author Mark Gates
This file implements lower case, and is called by strtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "strtri.cuh"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
static __device__ void
strtri_diag_lower_device(
magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ float sB[IB*IB];
float y_tx;
// load lower triangle of inner block of A; zero upper triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx >= j && ind < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_S_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_S_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_S_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_S_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_S_ONE / sB[tx + tx*IB];
}
}
// compute elements j+1:IB-1 of j-th column.
for( int j=IB-2; j >= 0; j-- ) {
if ( tx > j ) {
// trmv: y = sB(j+1:IB-1, j+1:IB-1) * sB(j+1:IB-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_S_ZERO;
#pragma unroll
for( int k=j+1; k < IB; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(j+1:IB-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB lower triangular matrix, and B its inverse.
Then the block decomposition
[ A11 0 ] * [ B11 0 ] = [ I 0 ]
[ A21 A22 ] [ B21 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A21*B11 + A22*B21 = 0 ==> B21 = -A22^{-1}*A21*B11 = -B22*A21*B11.
strtri_diag_kernel inverts A11 and A22.
triple_sgemm16 routines multiply:
part 1: B21 = A21 * B11,
part 2: B21 = -B22 * B21.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 0 ]
which contains [ B21 B22 ].
Outer blocks are NB x NB.
A21 may have < jb rows, but is guaranteed to have jb cols since A22 is on
the right. This makes a single check easy to do.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
We split this into part1 & part2 to synchronize all blocks and make sure
that writes to B12 are observed by all blocks.
*/
/*
* B21 = A21 * B11
*/
static __device__ void
triple_sgemm16_part1_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
// emulate 3D grid: NX * (NY*npages)
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// TODO this won't coalesce, will it? unless NX=32 (or maybe 16 with floats, or 8 with float-real)
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// TODO instead of writing result, copy it to sB and do part 2.
// Would only work for jb=16, because only then does rC fit into sB.
// If sB were [NT][16+], then rC would fit into sB.
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
static __device__ void
triple_sgemm16_part2_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
// TODO factor this out:
// gemm16<NX, NY> computes NT x 16 block of C:
// C(1:nt, 1:16) = A(1:nt, 1:jb) * B(1:jb, 1:16)
// where NT = NX * NY.
// part 1: gemm16<4,4>( /*NT, 16,*/ jb, 1, A21, lda, B11, NB, /*0*/, B21, NB, n, ind, tx, ty );
// part 2: gemm16<4,4>( /*NT, 16,*/ jb, -1, B22, NB, B21, NB, /*0*/, B21, NB, n, ind, tx, ty ); // okay for C to overwrite B
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
static __device__ void
triple_sgemm32_part1_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
static __device__ void
triple_sgemm32_part2_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by*16;
const int id = tx + ty*blockDim.x;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
static __device__ void
triple_sgemm64_part1_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
static __device__ void
triple_sgemm64_part2_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B21
B = C; // B21, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B21 = A21 * B11
*/
static __device__ void
triple_sgemm_above64_part1_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B21 = A21 * B11
const float *A, *B;
float *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B21 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb; // A21
B = d_dinvA; // B11
C = d_dinvA + jb*NB; // B21; write to B12 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B21 = -B22 * B21
*/
static __device__ void
triple_sgemm_above64_part2_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
const int ind = page*jb*2 + jb + ibx + id;
__shared__ float sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B21 = -B22 * B21
const float *A, *B;
float *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA + jb*NB + jb; // B22
B = d_dinvA + jb*NB; // B21, read from B12 temp location
C = d_dinvA + jb; // B21
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const float *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
float rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
float rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
if ( ind < n ) {
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
saxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
saxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
saxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
saxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
saxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
saxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
saxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
saxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
saxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
saxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
saxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
saxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
saxpy16( rA[0], &sB[12][0], rC );
saxpy16( rA[1], &sB[13][0], rC );
saxpy16( rA[2], &sB[14][0], rC );
saxpy16( rA[3], &sB[15][0], rC );
}
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B12 temp location
*/
static __device__ void
triple_sgemm_above64_part3_lower_device(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B12 temp location
float *B12;
int ldb = NB;
B12 = d_dinvA + jb*NB;
B12 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B12[i*ldb] = MAGMA_S_ZERO;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
strtri_diag_lower_kernel(
magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA)
{
strtri_diag_lower_device(diag, n, A, lda, d_dinvA);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm16_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm16_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm32_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm32_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm64_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm64_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part1_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part2_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part3_lower_kernel(
int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages)
{
triple_sgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
strtri_diag_lower_kernel_batched(
magma_diag_t diag, int n, float const * const * dA_array, int lda, float **dinvA_array)
{
int batchid = blockIdx.z;
strtri_diag_lower_device(diag, n, dA_array[batchid], lda, dinvA_array[batchid]);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm16_part1_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm16_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm16_part2_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm16_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm32_part1_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm32_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm32_part2_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm32_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm64_part1_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm64_part2_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part1_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm_above64_part1_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part2_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm_above64_part2_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
triple_sgemm_above64_part3_lower_kernel_batched(
int n, float const * const * Ain_array, int lda, float **dinvA_array, int jb, int npages)
{
int batchid = blockIdx.z;
triple_sgemm_above64_part3_lower_device( n, Ain_array[batchid], lda, dinvA_array[batchid], jb, npages);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
|
7b1016d92d580576e7718a0321f13ef75a7e89ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/device_alternate.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 7b1016d92d580576e7718a0321f13ef75a7e89ab.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/device_alternate.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
4f2483857a453531f9bb669f3fec1f7ede3ceee1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduce.h"
__device__ double merge(double old,double opOutput,double *extraParams) {
return max(old,opOutput);
}
__device__ double update(double old,double opOutput,double *extraParams) {
return max(old,opOutput);
}
__device__ double op(double d1,double *extraParams) {
return d1;
}
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) {
return reduction;
}
extern "C"
__global__ void max_strided_double(int n, int xOffset,double *dx,int incx,double *extraParams,double *result) {
transform(n,xOffset,dx,incx,extraParams,result);
}
| 4f2483857a453531f9bb669f3fec1f7ede3ceee1.cu | #include "reduce.h"
__device__ double merge(double old,double opOutput,double *extraParams) {
return max(old,opOutput);
}
__device__ double update(double old,double opOutput,double *extraParams) {
return max(old,opOutput);
}
__device__ double op(double d1,double *extraParams) {
return d1;
}
__device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) {
return reduction;
}
extern "C"
__global__ void max_strided_double(int n, int xOffset,double *dx,int incx,double *extraParams,double *result) {
transform(n,xOffset,dx,incx,extraParams,result);
}
|
ebfd4cf5562d6fc4719eb892f3c61327ef377baa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "transform.h"
__device__ float op(float d1,float *params) {
return logf(d1);
}
extern "C"
__global__ void log_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dy,incy,params,result);
}
| ebfd4cf5562d6fc4719eb892f3c61327ef377baa.cu | #include "transform.h"
__device__ float op(float d1,float *params) {
return logf(d1);
}
extern "C"
__global__ void log_strided_float(int n,int idx,float *dy,int incy,float *params,float *result) {
transform(n,idx,dy,incy,params,result);
}
|
97b69594e5f0d6584d629f85e2647230449a4ef6.hip | // !!! This is a file automatically generated by hipify!!!
/* GmpCudaDevice-getGcdKernel.cu -- provides GmpCudaDevice::getGcdKernel method
(includes the gcd kernel code).
Implementation of the modular integer gcd algorithm using L <= 32 bit moduli.
Reference: Weber, Trevisan, Martins 2005. A Modular Integer GCD algorithm
Journal of Algorithms 54, 2 (February, 2005) 152-167.
Note that there is an error in Fig. 2, which shows that the
final result can be recovered as the mixed radix representation
is calculated. In actuality, all the mixed radix digits and moduli
must be computed before the actual GCD can be recovered.
Based on initial work by
Authors: Justin Brew, Anthony Rizzo, Kenneth Weber
Mount Union College
June 25, 2009
Further revisions by
K. Weber University of Mount Union
weberk@mountunion.edu
See GmpCudaDevice.cu for revision history.
*/
// Enforce use of CUDA 9 or higher at compile time.
#if defined(CUDART_VERSION) && CUDART_VERSION >= 9000
#else
#error Requires CUDA 9 or more recent
#endif
#include <cassert>
#include <cstring>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include "GmpCuda.h"
#include "GmpCudaDevice-gcdDevicesQuoRemQuasi.h"
#include "modInv.h"
using namespace GmpCuda;
static constexpr int WARPS_PER_BLOCK = GmpCudaDevice::GCD_BLOCK_SZ / WARP_SZ;
static constexpr unsigned FULL_MASK = 0xFFFFFFFF; // Used in sync functions.
static constexpr uint64_t MODULUS_MASK = uint64_t{0xFFFFFFFF}; // Mask for modulus portion of pair.
static constexpr int32_t MOD_INFINITY = INT32_MIN; // Larger than any modulur value
typedef GmpCudaDevice::pair_t pair_t; // Used to pass back result.
// This type is used to conveniently manipulate the modulus and its inverse.
typedef struct {uint32_t modulus; uint64_t inverse;} modulus_t;
// Which thread in the warp satisfying the predicate has a nonzero value?
// Uses ballot so that every multiprocessor (deterministically) chooses the same pair.
// In case there is no winner, use the 0 from warpLane 0.
__device__
static
inline
int
findAnyNonZero(pair_t pair, bool predicate = true)
{
return max(0, __ffs(__ballot_sync(FULL_MASK, predicate && pair.value != 0)) - 1);
}
// Posts to the barrier one of the pair parameters whose value is not 0.
// If no such value is found, a pair with a 0 value is posted.
// Preconditions: all threads in block participate.
__device__
static
void
postAnyPairPriorityNonzero(pair_t pair, GmpCudaBarrier &bar)
{
__shared__ pair_t sharedPair[WARP_SZ];
__syncthreads(); // protect shared memory against last call to this function.
if (findAnyNonZero(pair) == threadIdx.x % WARP_SZ)
sharedPair[threadIdx.x / WARP_SZ] = pair;
__syncthreads();
pair = sharedPair[findAnyNonZero(sharedPair[threadIdx.x], threadIdx.x < WARPS_PER_BLOCK)];
bar.post(*reinterpret_cast<uint64_t *>(&pair));
}
// Chooses one of the pairs in the barrier that doesn't have a 0 value;
// chosen pair is returned in pair as result.
// If there are no nonzero values, a pair with value 0 is returned.
// Preconditions: all threads in block participate.
// Postcondition: every thread will have the same pair.
__device__
static
void
collectAnyPairPriorityNonzero(pair_t& __restrict__ pair,
GmpCudaBarrier& __restrict__ bar)
{
__shared__ pair_t sharedPair[WARP_SZ];
bar.collect(*reinterpret_cast<uint64_t*>(&pair)); // Only low gridDim.x threads have "good" values.
__syncthreads(); // protect shared memory against last call to this function.
int warpLane = threadIdx.x % WARP_SZ;
if (findAnyNonZero(pair, threadIdx.x < gridDim.x) == warpLane && threadIdx.x < gridDim.x)
sharedPair[threadIdx.x / WARP_SZ] = pair;
__syncthreads();
int numWarps = (gridDim.x - 1) / WARP_SZ + 1;
// All warps do this and get common value for winner.
pair = sharedPair[findAnyNonZero(sharedPair[warpLane], warpLane < numWarps)];
}
// Calculate min of x into lane 0 of warp.
__device__
inline
void
minWarp(uint64_t &x)
{
#pragma unroll
for (int i = WARP_SZ/2; i > 0; i /= 2)
x = min(x, __shfl_down_sync(FULL_MASK, x, i));
}
// Calculates abs(x), except that MOD_INFINITY == INT32_MIN is not changed.
__device__
static
inline
uint64_t
modAbs(int32_t x)
{
return (x < 0) ? ~x + 1 : x;
}
// Posts pair which achieves the minimum of the absolute value
// of all pairs in each threadblock to bar.
// Precondition: modulus of each pair is odd and all threads participate.
// Postcondition: bar is ready for collectMinPair to be called.
__device__
static
void
postMinPair(pair_t pair, GmpCudaBarrier& bar)
{
__shared__ uint64_t sharedX[WARP_SZ];
__syncthreads(); // protect shared memory against last call to this function.
// Prepare a long int composed of the absolute value of pair.value in the high bits and pair.modulus in the low bits.
// Store sign of pair.value in the low bit of pair.modulus, which should always be 1 since it's odd.
uint64_t x = (modAbs(pair.value) << 32) | (pair.modulus - (pair.value >= 0));
// Find the smallest in each warp, and store in sharedX.
minWarp(x);
if (threadIdx.x % WARP_SZ == 0)
sharedX[threadIdx.x / WARP_SZ] = x;
__syncthreads();
// Now find the min of the values in sharedX.
// WARPS_PER_BLOCK must be a power of 2 <= WARP_SZ.
if (threadIdx.x < WARP_SZ)
{
x = sharedX[threadIdx.x];
#pragma unroll
for (int i = WARPS_PER_BLOCK/2; i > 0; i /= 2)
x = min(x, __shfl_down_sync(FULL_MASK, x, i));
}
bar.post(x);
}
// Returns, in pair, the pair which achieves the global minimum of the absolute value
// of the value over all the pairs that have been posted to bar.
// Precondition: postMinPair was previously called and all threads participate.
__device__
static
void
collectMinPair(pair_t& __restrict__ pair, GmpCudaBarrier& __restrict__ bar)
{
uint64_t x;
bar.collect(x);
__shared__ uint64_t sharedX[WARP_SZ];
__syncthreads(); // protect shared memory against last call to this function.
int numWarps = (gridDim.x - 1) / WARP_SZ + 1;
if (threadIdx.x / WARP_SZ < numWarps)
{
if (threadIdx.x >= gridDim.x)
x = UINT64_MAX;
minWarp(x);
if (threadIdx.x % WARP_SZ == 0)
sharedX[threadIdx.x / WARP_SZ] = x;
}
__syncthreads();
if (threadIdx.x < WARP_SZ)
{
x = (threadIdx.x < numWarps) ? sharedX[threadIdx.x] : UINT64_MAX;
#pragma unroll
for (int i = WARPS_PER_BLOCK/2; i > 1; i /= 2) // assert(gridDim.x <= blockDim.x);
x = min(x, __shfl_down_sync(FULL_MASK, x, i));
sharedX[threadIdx.x] = min(x, __shfl_down_sync(FULL_MASK, x, 1));
}
__syncthreads();
x = sharedX[0];
pair.modulus = static_cast<uint32_t>(x & MODULUS_MASK);
pair.value = static_cast<int32_t>(x >> 32);
// Restore original sign.
if (pair.modulus & 1)
pair.value = ~pair.value + 1; // Should leave MOD_INFINITY unchanged.
pair.modulus |= 1;
}
// Determines whether the modulus is equal to x.
__device__
static
inline
bool
equals(uint32_t x, modulus_t m)
{
return (m.modulus == x);
}
// Return a - b (mod m) in the range 0..m-1.
// Precondition: a, b are both in the range 0..m-1.
__device__
static
inline
uint32_t
modSub(uint32_t a, uint32_t b, modulus_t m)
{
return a - b + (-(a < b) & m.modulus);
}
// Calculate x mod m, where x is 64 bits long.
__device__
static
inline
uint32_t
mod(uint64_t x, modulus_t m)
{
return x - static_cast<uint64_t>(m.modulus) * (__umul64hi(m.inverse, x) >> (L - 1));
}
// Return a * b (mod m) in the range 0..m-1.
// Precondition: a, b are both in the range 0..m-1, and m is prime.
__device__
static
inline
uint32_t
modMul(uint32_t a, uint32_t b, modulus_t m)
{
return mod(static_cast<uint64_t>(a) * b, m);
}
__device__
static
inline
uint32_t
fromSigned(int32_t x, modulus_t m)
{
return (x < 0) ? x + m.modulus : x;
}
// Give x mod m as a signed value in the range [-modulus/2, modulus,2]
__device__
static
inline
int32_t
toSigned(uint32_t x, modulus_t m)
{
return (x >= m.modulus/2) ? x - m.modulus : x;
}
// Calculate u/v mod m, in the range [0,m-1]
template <QuoRemType QRTYPE>
__device__
static
inline
uint32_t
modDiv(uint32_t u, uint32_t v, modulus_t m)
{
return modMul(u, modInv<QRTYPE>(v, m.modulus), m);
}
// Calculate x mod m for a multiword unsigned integer x.
__device__
static
uint32_t
modMP(uint32_t x[], size_t xSz, modulus_t m)
{
__shared__ uint32_t sharedX[WARP_SZ];
uint64_t result = uint64_t{0};
__syncthreads(); // protect shared memory against last call to this function.
while (xSz > warpSize)
{
xSz -= warpSize;
// Copy a block of x to shared memory for processing.
if (threadIdx.x < warpSize)
sharedX[threadIdx.x] = x[threadIdx.x + xSz];
__syncthreads();
// Process the block in shared memory.
for (size_t i = warpSize; i-- != 0; )
result = mod(result << 32 | sharedX[i], m);
__syncthreads();
}
// Now xSz <= warpSize. Copy remainder of x to shared memory and process.
if (threadIdx.x < xSz)
sharedX[threadIdx.x] = x[threadIdx.x];
__syncthreads();
for (size_t i = xSz; i-- != 0; )
result = mod(result << 32 | sharedX[i], m);
return static_cast<uint32_t>(result);
}
// Initialize modulus for this thread by reading a modulus m from the list
// and computing its "inverse", mInverse == 2^(W + L - 1) / m + 1.
__device__
static
inline
modulus_t
getModulus(uint32_t* moduliList)
{
uint32_t m = moduliList[blockDim.x * blockIdx.x + threadIdx.x];
uint64_t D = static_cast<uint64_t>(m);
constexpr uint64_t FC_hi = uint64_t{1} << (W - 1);
uint64_t q = FC_hi / D;
uint64_t r = FC_hi % D;
return {m, uint64_t{1} + (q << L) + (r << L) / D};
}
// Device kernel for the GmpCudaDevice::getGcdKernel method.
template <QuoRemType QRTYPE>
__global__
static
void
kernel(uint32_t* __restrict__ buf, size_t uSz, size_t vSz,
uint32_t* __restrict__ moduliList, GmpCudaBarrier bar)
{
int totalModuliRemaining = blockDim.x * gridDim.x;
int ubits = (uSz + 1) * 32; // somewhat of an overestimate
int vbits = (vSz + 1) * 32; // same here
//MGCD1: [Find suitable moduli]
modulus_t q = getModulus(moduliList);
//MGCD2: [Convert to modular representation]
uint32_t uq, vq;
uq = modMP(buf, uSz, q);
vq = modMP(buf + uSz, vSz, q);
//MGCD3: [reduction loop]
bool active = true; // Is the modulus owned by this thread active, or has it been retired?
pair_t pair, myPair;
myPair.modulus = q.modulus;
myPair.value = (vq == 0) ? MOD_INFINITY : toSigned(modDiv<QRTYPE>(uq, vq, q), q);
postMinPair(myPair, bar);
collectMinPair(pair, bar);
do
{
uint32_t p, tq;
int tbits;
if (equals(pair.modulus, q)) // Deactivate this modulus.
active = false, myPair.value = MOD_INFINITY;
if (active)
{
p = pair.modulus;
if (p > q.modulus) // Bring within range.
p -= q.modulus;
tq = modDiv<QRTYPE>(modSub(uq, modMul(fromSigned(pair.value, q), vq, q), q), p, q);
myPair.value = (tq == 0) ? MOD_INFINITY : toSigned(modDiv<QRTYPE>(vq, tq, q), q);
}
postMinPair(myPair, bar);
if (active)
uq = vq, vq = tq;
totalModuliRemaining -= 1;
tbits = ubits - (L - 1) + __ffs(abs(pair.value));
ubits = vbits, vbits = tbits;
if (totalModuliRemaining * (L - 2) <= ubits) // Ran out of moduli--means initial estimate was wrong.
{
if (blockIdx.x && threadIdx.x)
return;
buf[0] = GmpCudaDevice::GCD_KERNEL_ERROR, buf[1] = GmpCudaDevice::GCD_REDUX_ERROR;
return;
}
collectMinPair(pair, bar);
}
while (pair.value != MOD_INFINITY);
//MGCD4: [Find SIGNED mixed-radix representation] Each "digit" is either positive or negative.
pair_t* pairs = (pair_t *)buf + 1;
myPair.value = (active) ? toSigned(uq, q) : 0; // Inactive threads should have low priority.
postAnyPairPriorityNonzero(myPair, bar);
collectAnyPairPriorityNonzero(pair, bar);
do
{
if (equals(pair.modulus, q)) // deactivate modulus.
active = false, myPair.value = 0;
if (active)
{
uint32_t p = pair.modulus;
if (pair.modulus > q.modulus) // Bring within range.
p -= q.modulus;
uq = modDiv<QRTYPE>(modSub(uq, fromSigned(pair.value, q), q), p, q);
myPair.value = toSigned(uq, q);
}
postAnyPairPriorityNonzero(myPair, bar);
*pairs++ = pair;
totalModuliRemaining -= 1;
if (totalModuliRemaining <= 0) // Something went wrong.
break;
collectAnyPairPriorityNonzero(pair, bar);
}
while (pair.value != 0);
if (blockIdx.x | threadIdx.x) // Final cleanup by just one thread.
return;
// Return a count of all the nonzero pairs, plus one more "pair" that includes buf[0] itself.
// If there aren't enough moduli to recover the result, return error codes.
if (pair.value != 0)
buf[0] = GmpCudaDevice::GCD_KERNEL_ERROR, buf[1] = GmpCudaDevice::GCD_RECOVERY_ERROR;
else
buf[0] = pairs - reinterpret_cast<pair_t*>(buf);
}
__global__
static
void
checkFastReciprocal(bool* pass)
{
*pass = (fastReciprocal(1.0f) == 1.0f && fastReciprocal(2.0f) == 0.5f);
}
// Return the appropriate gcd kernel for a device to use, based on
// whether the device supports quoRem<QUASI>, quoRem<FAST_EXACT>, or quoRem<SAFE_EXACT>.
const
void*
GmpCudaDevice::getGcdKernel(char* devName)
{
void* ptr = bsearch(static_cast<const void*>(devName),
static_cast<const void*>(devicesQuoRemQuasi),
sizeof(devicesQuoRemQuasi)/sizeof(char*),
sizeof(char*),
[](const void* s1, const void* s2Ptr) -> int
{
return strcmp(static_cast<const char*>(s1), *static_cast<char * const *>(s2Ptr));
}
);
if (ptr != NULL)
return reinterpret_cast<const void *>(&kernel<QUASI>);
bool* globalPass;
bool pass;
assert(hipSuccess == hipMalloc(&globalPass, sizeof(pass)));
hipLaunchKernelGGL(( checkFastReciprocal), dim3(1),dim3(1), 0, 0, globalPass);
assert(hipSuccess == hipDeviceSynchronize());
assert(hipSuccess == hipMemcpy(&pass, globalPass, sizeof(pass), hipMemcpyDeviceToHost));
assert(hipSuccess == hipFree(globalPass));
return reinterpret_cast<const void *>((pass) ? &kernel<FAST_EXACT> : &kernel<SAFE_EXACT>);
}
| 97b69594e5f0d6584d629f85e2647230449a4ef6.cu | /* GmpCudaDevice-getGcdKernel.cu -- provides GmpCudaDevice::getGcdKernel method
(includes the gcd kernel code).
Implementation of the modular integer gcd algorithm using L <= 32 bit moduli.
Reference: Weber, Trevisan, Martins 2005. A Modular Integer GCD algorithm
Journal of Algorithms 54, 2 (February, 2005) 152-167.
Note that there is an error in Fig. 2, which shows that the
final result can be recovered as the mixed radix representation
is calculated. In actuality, all the mixed radix digits and moduli
must be computed before the actual GCD can be recovered.
Based on initial work by
Authors: Justin Brew, Anthony Rizzo, Kenneth Weber
Mount Union College
June 25, 2009
Further revisions by
K. Weber University of Mount Union
weberk@mountunion.edu
See GmpCudaDevice.cu for revision history.
*/
// Enforce use of CUDA 9 or higher at compile time.
#if defined(CUDART_VERSION) && CUDART_VERSION >= 9000
#else
#error Requires CUDA 9 or more recent
#endif
#include <cassert>
#include <cstring>
#include <cstdlib>
#include <cuda_runtime.h>
#include "GmpCuda.h"
#include "GmpCudaDevice-gcdDevicesQuoRemQuasi.h"
#include "modInv.h"
using namespace GmpCuda;
static constexpr int WARPS_PER_BLOCK = GmpCudaDevice::GCD_BLOCK_SZ / WARP_SZ;
static constexpr unsigned FULL_MASK = 0xFFFFFFFF; // Used in sync functions.
static constexpr uint64_t MODULUS_MASK = uint64_t{0xFFFFFFFF}; // Mask for modulus portion of pair.
static constexpr int32_t MOD_INFINITY = INT32_MIN; // Larger than any modulur value
typedef GmpCudaDevice::pair_t pair_t; // Used to pass back result.
// This type is used to conveniently manipulate the modulus and its inverse.
typedef struct {uint32_t modulus; uint64_t inverse;} modulus_t;
// Which thread in the warp satisfying the predicate has a nonzero value?
// Uses ballot so that every multiprocessor (deterministically) chooses the same pair.
// In case there is no winner, use the 0 from warpLane 0.
__device__
static
inline
int
findAnyNonZero(pair_t pair, bool predicate = true)
{
return max(0, __ffs(__ballot_sync(FULL_MASK, predicate && pair.value != 0)) - 1);
}
// Posts to the barrier one of the pair parameters whose value is not 0.
// If no such value is found, a pair with a 0 value is posted.
// Preconditions: all threads in block participate.
__device__
static
void
postAnyPairPriorityNonzero(pair_t pair, GmpCudaBarrier &bar)
{
__shared__ pair_t sharedPair[WARP_SZ];
__syncthreads(); // protect shared memory against last call to this function.
if (findAnyNonZero(pair) == threadIdx.x % WARP_SZ)
sharedPair[threadIdx.x / WARP_SZ] = pair;
__syncthreads();
pair = sharedPair[findAnyNonZero(sharedPair[threadIdx.x], threadIdx.x < WARPS_PER_BLOCK)];
bar.post(*reinterpret_cast<uint64_t *>(&pair));
}
// Chooses one of the pairs in the barrier that doesn't have a 0 value;
// chosen pair is returned in pair as result.
// If there are no nonzero values, a pair with value 0 is returned.
// Preconditions: all threads in block participate.
// Postcondition: every thread will have the same pair.
__device__
static
void
collectAnyPairPriorityNonzero(pair_t& __restrict__ pair,
GmpCudaBarrier& __restrict__ bar)
{
__shared__ pair_t sharedPair[WARP_SZ];
bar.collect(*reinterpret_cast<uint64_t*>(&pair)); // Only low gridDim.x threads have "good" values.
__syncthreads(); // protect shared memory against last call to this function.
int warpLane = threadIdx.x % WARP_SZ;
if (findAnyNonZero(pair, threadIdx.x < gridDim.x) == warpLane && threadIdx.x < gridDim.x)
sharedPair[threadIdx.x / WARP_SZ] = pair;
__syncthreads();
int numWarps = (gridDim.x - 1) / WARP_SZ + 1;
// All warps do this and get common value for winner.
pair = sharedPair[findAnyNonZero(sharedPair[warpLane], warpLane < numWarps)];
}
// Calculate min of x into lane 0 of warp.
__device__
inline
void
minWarp(uint64_t &x)
{
#pragma unroll
for (int i = WARP_SZ/2; i > 0; i /= 2)
x = min(x, __shfl_down_sync(FULL_MASK, x, i));
}
// Calculates abs(x), except that MOD_INFINITY == INT32_MIN is not changed.
__device__
static
inline
uint64_t
modAbs(int32_t x)
{
return (x < 0) ? ~x + 1 : x;
}
// Posts pair which achieves the minimum of the absolute value
// of all pairs in each threadblock to bar.
// Precondition: modulus of each pair is odd and all threads participate.
// Postcondition: bar is ready for collectMinPair to be called.
__device__
static
void
postMinPair(pair_t pair, GmpCudaBarrier& bar)
{
__shared__ uint64_t sharedX[WARP_SZ];
__syncthreads(); // protect shared memory against last call to this function.
// Prepare a long int composed of the absolute value of pair.value in the high bits and pair.modulus in the low bits.
// Store sign of pair.value in the low bit of pair.modulus, which should always be 1 since it's odd.
uint64_t x = (modAbs(pair.value) << 32) | (pair.modulus - (pair.value >= 0));
// Find the smallest in each warp, and store in sharedX.
minWarp(x);
if (threadIdx.x % WARP_SZ == 0)
sharedX[threadIdx.x / WARP_SZ] = x;
__syncthreads();
// Now find the min of the values in sharedX.
// WARPS_PER_BLOCK must be a power of 2 <= WARP_SZ.
if (threadIdx.x < WARP_SZ)
{
x = sharedX[threadIdx.x];
#pragma unroll
for (int i = WARPS_PER_BLOCK/2; i > 0; i /= 2)
x = min(x, __shfl_down_sync(FULL_MASK, x, i));
}
bar.post(x);
}
// Returns, in pair, the pair which achieves the global minimum of the absolute value
// of the value over all the pairs that have been posted to bar.
// Precondition: postMinPair was previously called and all threads participate.
__device__
static
void
collectMinPair(pair_t& __restrict__ pair, GmpCudaBarrier& __restrict__ bar)
{
uint64_t x;
bar.collect(x);
__shared__ uint64_t sharedX[WARP_SZ];
__syncthreads(); // protect shared memory against last call to this function.
int numWarps = (gridDim.x - 1) / WARP_SZ + 1;
if (threadIdx.x / WARP_SZ < numWarps)
{
if (threadIdx.x >= gridDim.x)
x = UINT64_MAX;
minWarp(x);
if (threadIdx.x % WARP_SZ == 0)
sharedX[threadIdx.x / WARP_SZ] = x;
}
__syncthreads();
if (threadIdx.x < WARP_SZ)
{
x = (threadIdx.x < numWarps) ? sharedX[threadIdx.x] : UINT64_MAX;
#pragma unroll
for (int i = WARPS_PER_BLOCK/2; i > 1; i /= 2) // assert(gridDim.x <= blockDim.x);
x = min(x, __shfl_down_sync(FULL_MASK, x, i));
sharedX[threadIdx.x] = min(x, __shfl_down_sync(FULL_MASK, x, 1));
}
__syncthreads();
x = sharedX[0];
pair.modulus = static_cast<uint32_t>(x & MODULUS_MASK);
pair.value = static_cast<int32_t>(x >> 32);
// Restore original sign.
if (pair.modulus & 1)
pair.value = ~pair.value + 1; // Should leave MOD_INFINITY unchanged.
pair.modulus |= 1;
}
// Determines whether the modulus is equal to x.
__device__
static
inline
bool
equals(uint32_t x, modulus_t m)
{
return (m.modulus == x);
}
// Return a - b (mod m) in the range 0..m-1.
// Precondition: a, b are both in the range 0..m-1.
__device__
static
inline
uint32_t
modSub(uint32_t a, uint32_t b, modulus_t m)
{
return a - b + (-(a < b) & m.modulus);
}
// Calculate x mod m, where x is 64 bits long.
__device__
static
inline
uint32_t
mod(uint64_t x, modulus_t m)
{
return x - static_cast<uint64_t>(m.modulus) * (__umul64hi(m.inverse, x) >> (L - 1));
}
// Return a * b (mod m) in the range 0..m-1.
// Precondition: a, b are both in the range 0..m-1, and m is prime.
__device__
static
inline
uint32_t
modMul(uint32_t a, uint32_t b, modulus_t m)
{
return mod(static_cast<uint64_t>(a) * b, m);
}
__device__
static
inline
uint32_t
fromSigned(int32_t x, modulus_t m)
{
return (x < 0) ? x + m.modulus : x;
}
// Give x mod m as a signed value in the range [-modulus/2, modulus,2]
__device__
static
inline
int32_t
toSigned(uint32_t x, modulus_t m)
{
return (x >= m.modulus/2) ? x - m.modulus : x;
}
// Calculate u/v mod m, in the range [0,m-1]
template <QuoRemType QRTYPE>
__device__
static
inline
uint32_t
modDiv(uint32_t u, uint32_t v, modulus_t m)
{
return modMul(u, modInv<QRTYPE>(v, m.modulus), m);
}
// Calculate x mod m for a multiword unsigned integer x.
__device__
static
uint32_t
modMP(uint32_t x[], size_t xSz, modulus_t m)
{
__shared__ uint32_t sharedX[WARP_SZ];
uint64_t result = uint64_t{0};
__syncthreads(); // protect shared memory against last call to this function.
while (xSz > warpSize)
{
xSz -= warpSize;
// Copy a block of x to shared memory for processing.
if (threadIdx.x < warpSize)
sharedX[threadIdx.x] = x[threadIdx.x + xSz];
__syncthreads();
// Process the block in shared memory.
for (size_t i = warpSize; i-- != 0; )
result = mod(result << 32 | sharedX[i], m);
__syncthreads();
}
// Now xSz <= warpSize. Copy remainder of x to shared memory and process.
if (threadIdx.x < xSz)
sharedX[threadIdx.x] = x[threadIdx.x];
__syncthreads();
for (size_t i = xSz; i-- != 0; )
result = mod(result << 32 | sharedX[i], m);
return static_cast<uint32_t>(result);
}
// Initialize modulus for this thread by reading a modulus m from the list
// and computing its "inverse", mInverse == 2^(W + L - 1) / m + 1.
__device__
static
inline
modulus_t
getModulus(uint32_t* moduliList)
{
uint32_t m = moduliList[blockDim.x * blockIdx.x + threadIdx.x];
uint64_t D = static_cast<uint64_t>(m);
constexpr uint64_t FC_hi = uint64_t{1} << (W - 1);
uint64_t q = FC_hi / D;
uint64_t r = FC_hi % D;
return {m, uint64_t{1} + (q << L) + (r << L) / D};
}
// Device kernel for the GmpCudaDevice::getGcdKernel method.
template <QuoRemType QRTYPE>
__global__
static
void
kernel(uint32_t* __restrict__ buf, size_t uSz, size_t vSz,
uint32_t* __restrict__ moduliList, GmpCudaBarrier bar)
{
int totalModuliRemaining = blockDim.x * gridDim.x;
int ubits = (uSz + 1) * 32; // somewhat of an overestimate
int vbits = (vSz + 1) * 32; // same here
//MGCD1: [Find suitable moduli]
modulus_t q = getModulus(moduliList);
//MGCD2: [Convert to modular representation]
uint32_t uq, vq;
uq = modMP(buf, uSz, q);
vq = modMP(buf + uSz, vSz, q);
//MGCD3: [reduction loop]
bool active = true; // Is the modulus owned by this thread active, or has it been retired?
pair_t pair, myPair;
myPair.modulus = q.modulus;
myPair.value = (vq == 0) ? MOD_INFINITY : toSigned(modDiv<QRTYPE>(uq, vq, q), q);
postMinPair(myPair, bar);
collectMinPair(pair, bar);
do
{
uint32_t p, tq;
int tbits;
if (equals(pair.modulus, q)) // Deactivate this modulus.
active = false, myPair.value = MOD_INFINITY;
if (active)
{
p = pair.modulus;
if (p > q.modulus) // Bring within range.
p -= q.modulus;
tq = modDiv<QRTYPE>(modSub(uq, modMul(fromSigned(pair.value, q), vq, q), q), p, q);
myPair.value = (tq == 0) ? MOD_INFINITY : toSigned(modDiv<QRTYPE>(vq, tq, q), q);
}
postMinPair(myPair, bar);
if (active)
uq = vq, vq = tq;
totalModuliRemaining -= 1;
tbits = ubits - (L - 1) + __ffs(abs(pair.value));
ubits = vbits, vbits = tbits;
if (totalModuliRemaining * (L - 2) <= ubits) // Ran out of moduli--means initial estimate was wrong.
{
if (blockIdx.x && threadIdx.x)
return;
buf[0] = GmpCudaDevice::GCD_KERNEL_ERROR, buf[1] = GmpCudaDevice::GCD_REDUX_ERROR;
return;
}
collectMinPair(pair, bar);
}
while (pair.value != MOD_INFINITY);
//MGCD4: [Find SIGNED mixed-radix representation] Each "digit" is either positive or negative.
pair_t* pairs = (pair_t *)buf + 1;
myPair.value = (active) ? toSigned(uq, q) : 0; // Inactive threads should have low priority.
postAnyPairPriorityNonzero(myPair, bar);
collectAnyPairPriorityNonzero(pair, bar);
do
{
if (equals(pair.modulus, q)) // deactivate modulus.
active = false, myPair.value = 0;
if (active)
{
uint32_t p = pair.modulus;
if (pair.modulus > q.modulus) // Bring within range.
p -= q.modulus;
uq = modDiv<QRTYPE>(modSub(uq, fromSigned(pair.value, q), q), p, q);
myPair.value = toSigned(uq, q);
}
postAnyPairPriorityNonzero(myPair, bar);
*pairs++ = pair;
totalModuliRemaining -= 1;
if (totalModuliRemaining <= 0) // Something went wrong.
break;
collectAnyPairPriorityNonzero(pair, bar);
}
while (pair.value != 0);
if (blockIdx.x | threadIdx.x) // Final cleanup by just one thread.
return;
// Return a count of all the nonzero pairs, plus one more "pair" that includes buf[0] itself.
// If there aren't enough moduli to recover the result, return error codes.
if (pair.value != 0)
buf[0] = GmpCudaDevice::GCD_KERNEL_ERROR, buf[1] = GmpCudaDevice::GCD_RECOVERY_ERROR;
else
buf[0] = pairs - reinterpret_cast<pair_t*>(buf);
}
__global__
static
void
checkFastReciprocal(bool* pass)
{
*pass = (fastReciprocal(1.0f) == 1.0f && fastReciprocal(2.0f) == 0.5f);
}
// Return the appropriate gcd kernel for a device to use, based on
// whether the device supports quoRem<QUASI>, quoRem<FAST_EXACT>, or quoRem<SAFE_EXACT>.
const
void*
GmpCudaDevice::getGcdKernel(char* devName)
{
void* ptr = bsearch(static_cast<const void*>(devName),
static_cast<const void*>(devicesQuoRemQuasi),
sizeof(devicesQuoRemQuasi)/sizeof(char*),
sizeof(char*),
[](const void* s1, const void* s2Ptr) -> int
{
return strcmp(static_cast<const char*>(s1), *static_cast<char * const *>(s2Ptr));
}
);
if (ptr != NULL)
return reinterpret_cast<const void *>(&kernel<QUASI>);
bool* globalPass;
bool pass;
assert(cudaSuccess == cudaMalloc(&globalPass, sizeof(pass)));
checkFastReciprocal<<<1,1>>>(globalPass);
assert(cudaSuccess == cudaDeviceSynchronize());
assert(cudaSuccess == cudaMemcpy(&pass, globalPass, sizeof(pass), cudaMemcpyDeviceToHost));
assert(cudaSuccess == cudaFree(globalPass));
return reinterpret_cast<const void *>((pass) ? &kernel<FAST_EXACT> : &kernel<SAFE_EXACT>);
}
|
de5e914a7a2d48102c551ed59e9cfcb53b7bff6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
if (comp > (-0.0f / var_5 + +1.8646E35f + -0.0f - (-1.2792E-42f + -1.1574E-35f))) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp += -0.0f + logf((-1.3791E-42f / var_6 * (var_7 * (+1.8509E-44f * (var_8 * +0.0f)))));
float tmp_1 = +1.3279E35f;
comp += tmp_1 + atanf(ceilf((+1.3833E-36f + var_9)));
for (int i=0; i < var_3; ++i) {
comp += -0.0f + +0.0f + -1.7904E-43f - -1.8219E-43f;
}
if (comp >= (var_10 / +0.0f * (+1.4879E-36f * var_11))) {
comp += floorf(+1.3935E-1f + (+0.0f + floorf(expf(+1.2877E-37f / var_12 / +1.3708E-44f / logf((var_13 * (var_14 / var_15)))))));
comp = cosf((var_16 + sinhf(-1.5834E36f)));
}
for (int i=0; i < var_4; ++i) {
float tmp_2 = (var_17 + fabsf(var_18 - (-1.2421E29f + var_19 + +1.8422E35f * -1.4727E36f)));
comp += tmp_2 / var_20 / atanf(var_21 / (var_22 * (+0.0f * (-1.6922E-14f + -1.3643E-37f))));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
hipDeviceSynchronize();
return 0;
}
| de5e914a7a2d48102c551ed59e9cfcb53b7bff6f.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
if (comp > (-0.0f / var_5 + +1.8646E35f + -0.0f - (-1.2792E-42f + -1.1574E-35f))) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp += -0.0f + logf((-1.3791E-42f / var_6 * (var_7 * (+1.8509E-44f * (var_8 * +0.0f)))));
float tmp_1 = +1.3279E35f;
comp += tmp_1 + atanf(ceilf((+1.3833E-36f + var_9)));
for (int i=0; i < var_3; ++i) {
comp += -0.0f + +0.0f + -1.7904E-43f - -1.8219E-43f;
}
if (comp >= (var_10 / +0.0f * (+1.4879E-36f * var_11))) {
comp += floorf(+1.3935E-1f + (+0.0f + floorf(expf(+1.2877E-37f / var_12 / +1.3708E-44f / logf((var_13 * (var_14 / var_15)))))));
comp = cosf((var_16 + sinhf(-1.5834E36f)));
}
for (int i=0; i < var_4; ++i) {
float tmp_2 = (var_17 + fabsf(var_18 - (-1.2421E29f + var_19 + +1.8422E35f * -1.4727E36f)));
comp += tmp_2 / var_20 / atanf(var_21 / (var_22 * (+0.0f * (-1.6922E-14f + -1.3643E-37f))));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
}
|
2a4832ba5b008d676589f95ea6b54d6d732f2f3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sieve_atkin_cuda.cuh"
//#include "../support/cuda_error_output.h"
//CUDA---------------------------------------------------------------------------------------------
__global__ void AtkinKernel(size_t in_start, size_t in_n, bool* in_device_memory) {
//Get the thread's index
size_t x = blockIdx.x*blockDim.x + threadIdx.x;
//The first cuda thread has id 0
//-> It computes number x
//-> It should set array index x-1
x += in_start;
//Sieve of Atkins
//> For (x^2 <= n) and (y^2 <= n), x = 1,2,..., y = 1,2,...
//> A number is prime if any of the following is true:
//>> (z = 4*x*x + y*y) has odd number of solutions AND (z % 12 = 1) or (z % 12 = 5)
//>> (z = 3*x*x + y*y) has odd number of solutions AND (z % 12 = 7)
//>> (z = 3*x*x - y*y) has odd number of solutions AND (x > y) AND (z % 12 = 11)
//> Multiples of squares might have been marked, delist:
//>> (z = x*x*y), x = 1,2,..., y = 1,2,...
//NTS: An terrible amount of if-statements for a GPGPU kernel.
//- Path divergence will cause slowdown.
//- Could one rewrite them as an assignment ?
//- (ergo: 'if(x) set array = true' -> set array (x))
//- Might overwrite already set correct result in some cases
//- Since we only set 'true' maybe we can make it so it does not overwrite already true entries?
//NTS: "Odd number of solutions", does that mean we should flip the state to the inverse?
// Two hits (even number of solutions) would then flip false->true->false
//Ans: Yes, apparently.
if (x*x <= in_n) {
for (size_t y = 1; y*y <= in_n; y++) {
size_t z = (4*x*x) + (y*y);
//if (z <= in_n && (z % 12 == 1 || z % 12 == 5)) { in_device_memory[z - 1] = true; }
if (z <= in_n && (z % 12 == 1 || z % 12 == 5)) { in_device_memory[z - 1] = !in_device_memory[z - 1]; }
z = (3*x*x) + (y*y);
//if (z <= in_n && (z % 12 == 7)) { in_device_memory[z - 1] = true; }
if (z <= in_n && (z % 12 == 7)) { in_device_memory[z - 1] = !in_device_memory[z - 1]; }
z = (3*x*x) - (y*y);
//if (z <= in_n && (x > y) && (z % 12 == 11)) { in_device_memory[z - 1] = true; }
if (z <= in_n && (x > y) && (z % 12 == 11)) { in_device_memory[z - 1] = !in_device_memory[z - 1]; }
}
}
//Wait for other threads to avoid race-condition
__syncthreads();
if (x >= 5 && x*x <= in_n) {
if (in_device_memory[x - 1]) {
for (size_t y = x*x; y <= in_n; y += x*x) {
in_device_memory[y - 1] = false;
}
}
}
}
//Private------------------------------------------------------------------------------------------
void SieveAtkinCUDA::SieveKernel(unsigned int in_blocks, unsigned int in_threads, size_t in_start, size_t in_end, bool* in_mem_ptr) {
hipLaunchKernelGGL(( AtkinKernel) , dim3(in_blocks), dim3(in_threads), 0, 0, in_start, in_end, in_mem_ptr);
}
void SieveAtkinCUDA::AtkinSquareCleanUp() {
for (size_t x = 5; x*x <= this->end_; x++) {
if (this->mem_class_ptr_->CheckIndex(x - 1)) {
for (size_t y = x * x; y <= this->end_; y += x * x) {
this->mem_class_ptr_->SetNonPrime(y - 1);
}
}
}
}
void SieveAtkinCUDA::DoSieve() {
//Allocate
this->AllocateGPUMemory();
this->timer_.SaveTime();
//Upload
this->UploadMemory();
this->timer_.SaveTime();
//Launch work-groups
this->LaunchKernel(this->start_);
this->timer_.SaveTime();
//Download
this->DownloadMemory();
this->timer_.SaveTime();
//Deallocate
this->DeallocateGPUMemory();
//Optional: If __syncthreads() isn't used in kernel
//this->AtkinSquareCleanUp();
}
size_t SieveAtkinCUDA::IndexToNumber(size_t in_i) {
return this->start_ + in_i;
}
//Public-------------------------------------------------------------------------------------------
SieveAtkinCUDA::SieveAtkinCUDA(size_t in_n)
: SieveBase(1, in_n), SieveCUDA() {
//NTS: Atkins excluding limit? ( [1, n[ )
//Determine memory capacity needed
size_t mem_size = in_n;
this->mem_class_ptr_ = new PrimeMemoryBool(mem_size);
//this->mem_class_ptr_ = new PrimeMemoryBit(mem_size);
this->LinkMemory(this->mem_class_ptr_);
//Atkin starts all as non-primes
this->mem_class_ptr_->SetAllNonPrime();
this->timer_.SaveTime();
//Set 2 and 3 manually as sieving process starts at 5
if (in_n >= 2) { this->mem_class_ptr_->SetPrime(1); }
if (in_n >= 3) { this->mem_class_ptr_->SetPrime(2); }
this->DoSieve();
this->timer_.SaveTime();
}
SieveAtkinCUDA::SieveAtkinCUDA(size_t in_n, PrimeMemoryFragsafe * in_ptr)
: SieveBase(1, in_n), SieveCUDA() {
//NTS: Atkins excluding limit? ( [1, n[ )
//Determine memory capacity needed
size_t mem_size = in_n;
//Set fragsafe memory
in_ptr->AllocateSubMemory(mem_size);
this->mem_class_ptr_ = in_ptr;
this->LinkMemory(this->mem_class_ptr_);
//Atkin starts all as non-primes
this->mem_class_ptr_->SetAllNonPrime();
this->timer_.SaveTime();
//Set 2 and 3 manually as sieving process starts at 5
if (in_n >= 2) { this->mem_class_ptr_->SetPrime(1); }
if (in_n >= 3) { this->mem_class_ptr_->SetPrime(2); }
this->DoSieve();
this->timer_.SaveTime();
}
SieveAtkinCUDA::~SieveAtkinCUDA() {
//Do not delete memory if its a fragsafe pointer
if (dynamic_cast<PrimeMemoryFragsafe*>(this->mem_class_ptr_) != nullptr) { return; }
if (this->mem_class_ptr_ != nullptr) {
delete this->mem_class_ptr_;
this->mem_class_ptr_ = nullptr;
}
}
bool SieveAtkinCUDA::IsPrime(size_t in_num) {
//Everything outside scope is false
if (in_num < this->start_ || in_num > this->end_) { return false; }
//Otherwise return the stored bool for that value
//Offset number to correct index
size_t the_number_index = in_num - this->start_;
//Return
return this->mem_class_ptr_->CheckIndex(the_number_index);
}
| 2a4832ba5b008d676589f95ea6b54d6d732f2f3a.cu | #include "sieve_atkin_cuda.cuh"
//#include "../support/cuda_error_output.h"
//CUDA---------------------------------------------------------------------------------------------
__global__ void AtkinKernel(size_t in_start, size_t in_n, bool* in_device_memory) {
//Get the thread's index
size_t x = blockIdx.x*blockDim.x + threadIdx.x;
//The first cuda thread has id 0
//-> It computes number x
//-> It should set array index x-1
x += in_start;
//Sieve of Atkins
//> For (x^2 <= n) and (y^2 <= n), x = 1,2,..., y = 1,2,...
//> A number is prime if any of the following is true:
//>> (z = 4*x*x + y*y) has odd number of solutions AND (z % 12 = 1) or (z % 12 = 5)
//>> (z = 3*x*x + y*y) has odd number of solutions AND (z % 12 = 7)
//>> (z = 3*x*x - y*y) has odd number of solutions AND (x > y) AND (z % 12 = 11)
//> Multiples of squares might have been marked, delist:
//>> (z = x*x*y), x = 1,2,..., y = 1,2,...
//NTS: An terrible amount of if-statements for a GPGPU kernel.
//- Path divergence will cause slowdown.
//- Could one rewrite them as an assignment ?
//- (ergo: 'if(x) set array = true' -> set array (x))
//- Might overwrite already set correct result in some cases
//- Since we only set 'true' maybe we can make it so it does not overwrite already true entries?
//NTS: "Odd number of solutions", does that mean we should flip the state to the inverse?
// Two hits (even number of solutions) would then flip false->true->false
//Ans: Yes, apparently.
if (x*x <= in_n) {
for (size_t y = 1; y*y <= in_n; y++) {
size_t z = (4*x*x) + (y*y);
//if (z <= in_n && (z % 12 == 1 || z % 12 == 5)) { in_device_memory[z - 1] = true; }
if (z <= in_n && (z % 12 == 1 || z % 12 == 5)) { in_device_memory[z - 1] = !in_device_memory[z - 1]; }
z = (3*x*x) + (y*y);
//if (z <= in_n && (z % 12 == 7)) { in_device_memory[z - 1] = true; }
if (z <= in_n && (z % 12 == 7)) { in_device_memory[z - 1] = !in_device_memory[z - 1]; }
z = (3*x*x) - (y*y);
//if (z <= in_n && (x > y) && (z % 12 == 11)) { in_device_memory[z - 1] = true; }
if (z <= in_n && (x > y) && (z % 12 == 11)) { in_device_memory[z - 1] = !in_device_memory[z - 1]; }
}
}
//Wait for other threads to avoid race-condition
__syncthreads();
if (x >= 5 && x*x <= in_n) {
if (in_device_memory[x - 1]) {
for (size_t y = x*x; y <= in_n; y += x*x) {
in_device_memory[y - 1] = false;
}
}
}
}
//Private------------------------------------------------------------------------------------------
void SieveAtkinCUDA::SieveKernel(unsigned int in_blocks, unsigned int in_threads, size_t in_start, size_t in_end, bool* in_mem_ptr) {
AtkinKernel <<<in_blocks, in_threads, 0>>> (in_start, in_end, in_mem_ptr);
}
void SieveAtkinCUDA::AtkinSquareCleanUp() {
for (size_t x = 5; x*x <= this->end_; x++) {
if (this->mem_class_ptr_->CheckIndex(x - 1)) {
for (size_t y = x * x; y <= this->end_; y += x * x) {
this->mem_class_ptr_->SetNonPrime(y - 1);
}
}
}
}
void SieveAtkinCUDA::DoSieve() {
//Allocate
this->AllocateGPUMemory();
this->timer_.SaveTime();
//Upload
this->UploadMemory();
this->timer_.SaveTime();
//Launch work-groups
this->LaunchKernel(this->start_);
this->timer_.SaveTime();
//Download
this->DownloadMemory();
this->timer_.SaveTime();
//Deallocate
this->DeallocateGPUMemory();
//Optional: If __syncthreads() isn't used in kernel
//this->AtkinSquareCleanUp();
}
size_t SieveAtkinCUDA::IndexToNumber(size_t in_i) {
return this->start_ + in_i;
}
//Public-------------------------------------------------------------------------------------------
SieveAtkinCUDA::SieveAtkinCUDA(size_t in_n)
: SieveBase(1, in_n), SieveCUDA() {
//NTS: Atkins excluding limit? ( [1, n[ )
//Determine memory capacity needed
size_t mem_size = in_n;
this->mem_class_ptr_ = new PrimeMemoryBool(mem_size);
//this->mem_class_ptr_ = new PrimeMemoryBit(mem_size);
this->LinkMemory(this->mem_class_ptr_);
//Atkin starts all as non-primes
this->mem_class_ptr_->SetAllNonPrime();
this->timer_.SaveTime();
//Set 2 and 3 manually as sieving process starts at 5
if (in_n >= 2) { this->mem_class_ptr_->SetPrime(1); }
if (in_n >= 3) { this->mem_class_ptr_->SetPrime(2); }
this->DoSieve();
this->timer_.SaveTime();
}
SieveAtkinCUDA::SieveAtkinCUDA(size_t in_n, PrimeMemoryFragsafe * in_ptr)
: SieveBase(1, in_n), SieveCUDA() {
//NTS: Atkins excluding limit? ( [1, n[ )
//Determine memory capacity needed
size_t mem_size = in_n;
//Set fragsafe memory
in_ptr->AllocateSubMemory(mem_size);
this->mem_class_ptr_ = in_ptr;
this->LinkMemory(this->mem_class_ptr_);
//Atkin starts all as non-primes
this->mem_class_ptr_->SetAllNonPrime();
this->timer_.SaveTime();
//Set 2 and 3 manually as sieving process starts at 5
if (in_n >= 2) { this->mem_class_ptr_->SetPrime(1); }
if (in_n >= 3) { this->mem_class_ptr_->SetPrime(2); }
this->DoSieve();
this->timer_.SaveTime();
}
SieveAtkinCUDA::~SieveAtkinCUDA() {
//Do not delete memory if its a fragsafe pointer
if (dynamic_cast<PrimeMemoryFragsafe*>(this->mem_class_ptr_) != nullptr) { return; }
if (this->mem_class_ptr_ != nullptr) {
delete this->mem_class_ptr_;
this->mem_class_ptr_ = nullptr;
}
}
bool SieveAtkinCUDA::IsPrime(size_t in_num) {
//Everything outside scope is false
if (in_num < this->start_ || in_num > this->end_) { return false; }
//Otherwise return the stored bool for that value
//Offset number to correct index
size_t the_number_index = in_num - this->start_;
//Return
return this->mem_class_ptr_->CheckIndex(the_number_index);
}
|
a8a90d866e034058f962b598ab195dda2052e068.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels_hip.cuh"
#include "Functions.cuh"
__global__ void DrawToTexture(hipSurfaceObject_t texture, float4* frameBuffer, int screenWidth, int screenHeight, uint frameNumber) {
uint i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= screenWidth * screenHeight) return;
uint x = i % screenWidth;
uint y = i / screenWidth;
// IMPORTANT: Surface functions use bytes for addressing memory; x-coordinate is in bytes.
// Y-coordinate does not need to be multiplied as the byte offset of the corresponding y-coordinate is internally calculated.
float4 color = frameBuffer[i];
float4 previousColor;
surf2Dread(&previousColor, texture, x * sizeof(float4), y);
float reciprocal = 1.0f / (frameNumber + 1);
surf2Dwrite(color * reciprocal + previousColor * frameNumber * reciprocal, texture, x * sizeof(float4), y);
}
__global__ void ResetCompactionArray(CompactionArray compactionArray) {
compactionArray.Reset();
}
__global__ void InitializeRng(RngState* rngStates, int count) {
uint i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= count) return;
RNG_INIT(1337 + i, 0, i / 32, &rngStates[i]);
}
__global__ void InitializeRays(Ray* rays, RngState* rngStates, int screenWidth, int screenHeight, float4 origin, float4 topLeft, float4 bottomLeft, float4 bottomRight, Intersection* intersections, float4* stepBuffer, float4* frameBuffer, CompactionArray traverseSceneCompaction) {
uint i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= screenWidth * screenHeight) return;
uint x = i % screenWidth;
uint y = i / screenWidth;
float xScreen = ((float)x + RNG_GET_UNIFORM(&rngStates[i])) / screenWidth;
float yScreen = ((float)y + RNG_GET_UNIFORM(&rngStates[i])) / screenHeight;
Ray* rayPtr = &rays[i];
rayPtr->origin = origin;
rayPtr->direction = normalize(bottomLeft + (bottomRight - bottomLeft) * xScreen + (topLeft - bottomLeft) * yScreen);
intersections[i] = NO_INTERSECTION;
stepBuffer[i] = float4 { 1.0f, 1.0f, 1.0f, 1.0f };
frameBuffer[i] = float4 { 0.0f, 0.0f, 0.0f, 1.0f };
if (i == 0) {
traverseSceneCompaction.data[0] = screenWidth * screenHeight;
}
traverseSceneCompaction.data[i + 1] = i;
}
__global__ void TraverseScene(Ray* rays, Triangle* triangles, int triangleCount, Vertex* vertices, Intersection* intersections, CompactionArray intersectCompaction, CompactionArray traverseSceneCompaction) {
uint laneIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (laneIdx >= traverseSceneCompaction.GetCount()) return;
uint rayIdx = traverseSceneCompaction.Get(laneIdx);
for (int triangleIdx = 0; triangleIdx < triangleCount; triangleIdx++) {
Intersection intersection = RayIntersectsTriangle(&rays[rayIdx], &triangles[triangleIdx], vertices);
if (intersection.t > EPSILON && intersection.t < intersections[rayIdx].t) {
intersections[rayIdx] = intersection;
intersectCompaction.Add(rayIdx);
}
}
}
__global__ void Intersect(Ray* rays, Intersection* intersections, Material* materials, RngState* rngStates, float4* stepBuffer, float4* frameBuffer, CompactionArray intersectCompaction, CompactionArray traverseSceneCompaction) {
uint laneIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (laneIdx >= intersectCompaction.GetCount()) return;
uint rayIdx = intersectCompaction.Get(laneIdx);
Material* materialPtr = &materials[intersections[rayIdx].materialIdx];
float4 materialColor = materialPtr->color;
if (materialPtr->type == Material::MaterialType::DIFFUSE) {
float4 reflection = GetDiffuseReflection(intersections[rayIdx].normal, &rngStates[rayIdx]);
rays[rayIdx].origin += rays[rayIdx].direction * intersections[rayIdx].t + reflection * EPSILON;
rays[rayIdx].direction = reflection;
stepBuffer[rayIdx] *= dot(intersections[rayIdx].normal, reflection) * 2.0f * materialColor;
traverseSceneCompaction.Add(rayIdx);
}
else if (materialPtr->type == Material::MaterialType::EMISSIVE) {
frameBuffer[rayIdx] = stepBuffer[rayIdx] * materialColor;
}
else {
stepBuffer[rayIdx] *= make_float4(0.0f, 0.0f, 0.0f, 1.0f);
}
}
| a8a90d866e034058f962b598ab195dda2052e068.cu | #include "kernels.cuh"
#include "Functions.cuh"
__global__ void DrawToTexture(cudaSurfaceObject_t texture, float4* frameBuffer, int screenWidth, int screenHeight, uint frameNumber) {
uint i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= screenWidth * screenHeight) return;
uint x = i % screenWidth;
uint y = i / screenWidth;
// IMPORTANT: Surface functions use bytes for addressing memory; x-coordinate is in bytes.
// Y-coordinate does not need to be multiplied as the byte offset of the corresponding y-coordinate is internally calculated.
float4 color = frameBuffer[i];
float4 previousColor;
surf2Dread(&previousColor, texture, x * sizeof(float4), y);
float reciprocal = 1.0f / (frameNumber + 1);
surf2Dwrite(color * reciprocal + previousColor * frameNumber * reciprocal, texture, x * sizeof(float4), y);
}
__global__ void ResetCompactionArray(CompactionArray compactionArray) {
compactionArray.Reset();
}
__global__ void InitializeRng(RngState* rngStates, int count) {
uint i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= count) return;
RNG_INIT(1337 + i, 0, i / 32, &rngStates[i]);
}
__global__ void InitializeRays(Ray* rays, RngState* rngStates, int screenWidth, int screenHeight, float4 origin, float4 topLeft, float4 bottomLeft, float4 bottomRight, Intersection* intersections, float4* stepBuffer, float4* frameBuffer, CompactionArray traverseSceneCompaction) {
uint i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= screenWidth * screenHeight) return;
uint x = i % screenWidth;
uint y = i / screenWidth;
float xScreen = ((float)x + RNG_GET_UNIFORM(&rngStates[i])) / screenWidth;
float yScreen = ((float)y + RNG_GET_UNIFORM(&rngStates[i])) / screenHeight;
Ray* rayPtr = &rays[i];
rayPtr->origin = origin;
rayPtr->direction = normalize(bottomLeft + (bottomRight - bottomLeft) * xScreen + (topLeft - bottomLeft) * yScreen);
intersections[i] = NO_INTERSECTION;
stepBuffer[i] = float4 { 1.0f, 1.0f, 1.0f, 1.0f };
frameBuffer[i] = float4 { 0.0f, 0.0f, 0.0f, 1.0f };
if (i == 0) {
traverseSceneCompaction.data[0] = screenWidth * screenHeight;
}
traverseSceneCompaction.data[i + 1] = i;
}
__global__ void TraverseScene(Ray* rays, Triangle* triangles, int triangleCount, Vertex* vertices, Intersection* intersections, CompactionArray intersectCompaction, CompactionArray traverseSceneCompaction) {
uint laneIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (laneIdx >= traverseSceneCompaction.GetCount()) return;
uint rayIdx = traverseSceneCompaction.Get(laneIdx);
for (int triangleIdx = 0; triangleIdx < triangleCount; triangleIdx++) {
Intersection intersection = RayIntersectsTriangle(&rays[rayIdx], &triangles[triangleIdx], vertices);
if (intersection.t > EPSILON && intersection.t < intersections[rayIdx].t) {
intersections[rayIdx] = intersection;
intersectCompaction.Add(rayIdx);
}
}
}
__global__ void Intersect(Ray* rays, Intersection* intersections, Material* materials, RngState* rngStates, float4* stepBuffer, float4* frameBuffer, CompactionArray intersectCompaction, CompactionArray traverseSceneCompaction) {
uint laneIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (laneIdx >= intersectCompaction.GetCount()) return;
uint rayIdx = intersectCompaction.Get(laneIdx);
Material* materialPtr = &materials[intersections[rayIdx].materialIdx];
float4 materialColor = materialPtr->color;
if (materialPtr->type == Material::MaterialType::DIFFUSE) {
float4 reflection = GetDiffuseReflection(intersections[rayIdx].normal, &rngStates[rayIdx]);
rays[rayIdx].origin += rays[rayIdx].direction * intersections[rayIdx].t + reflection * EPSILON;
rays[rayIdx].direction = reflection;
stepBuffer[rayIdx] *= dot(intersections[rayIdx].normal, reflection) * 2.0f * materialColor;
traverseSceneCompaction.Add(rayIdx);
}
else if (materialPtr->type == Material::MaterialType::EMISSIVE) {
frameBuffer[rayIdx] = stepBuffer[rayIdx] * materialColor;
}
else {
stepBuffer[rayIdx] *= make_float4(0.0f, 0.0f, 0.0f, 1.0f);
}
}
|
d843da285f1e4e13f959b0334bd0efada7236dc4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float t2_0=0.0f, t2_1=0.0f, t3_0=0.0f, t3_1=0.0f, t4_0=0.0f, t4_1=0.0f, t5_0=0.0f, t5_1=0.0f, out=0.0f;
float b2_0=0.0f, b2_1=0.0f, b3_0=0.0f, b3_1=0.0f, b4_0=0.0f, b4_1=0.0f, b5_0=0.0f, b5_1=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-16);
int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y));
//Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_1__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = 0.0f;
}
// Initial loop
for (int __iter_1__ = FORMA_MAX(0,__iter_y__-8); __iter_1__ <= __iter_y__+7; __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t4_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t4_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b4_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b4_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t5_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t5_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b5_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b5_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3_1; b3_1 = b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4_1; b4_1 = b4_0; b4_0=t4_0; t4_0=t4_1; t4_1=0.0f;
out=b5_1; b5_1=b5_0; b5_0=t5_0; t5_0=t5_1; t5_1=0.0f;
}
// Rest of the computation
for (int __iter_1__ = __iter_y__+8; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+7); __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t4_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t4_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b4_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b4_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t5_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t5_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b5_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b5_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
__var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-8,0)] = out;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3_1; b3_1 = b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4_1; b4_1 = b4_0; b4_0=t4_0; t4_0=t4_1; t4_1=0.0f;
out=b5_1; b5_1=b5_0; b5_0=t5_0; t5_0=t5_1; t5_1=0.0f;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__size_0___kernel___forma_kernel__0__/32);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<1000; x++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/32, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| d843da285f1e4e13f959b0334bd0efada7236dc4.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#include <nvml.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float t2_0=0.0f, t2_1=0.0f, t3_0=0.0f, t3_1=0.0f, t4_0=0.0f, t4_1=0.0f, t5_0=0.0f, t5_1=0.0f, out=0.0f;
float b2_0=0.0f, b2_1=0.0f, b3_0=0.0f, b3_1=0.0f, b4_0=0.0f, b4_1=0.0f, b5_0=0.0f, b5_1=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X-16);
int __iter_y__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y));
//Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_1__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = 0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = 0.0f;
}
// Initial loop
for (int __iter_1__ = FORMA_MAX(0,__iter_y__-8); __iter_1__ <= __iter_y__+7; __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t4_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t4_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b4_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b4_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t5_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t5_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b5_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b5_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3_1; b3_1 = b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4_1; b4_1 = b4_0; b4_0=t4_0; t4_0=t4_1; t4_1=0.0f;
out=b5_1; b5_1=b5_0; b5_0=t5_0; t5_0=t5_1; t5_1=0.0f;
}
// Rest of the computation
for (int __iter_1__ = __iter_y__+8; __iter_1__ <= FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+7); __iter_1__++) {
if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) {
__tilevar_0__[__iter_3__-__iter_0__] = input[__iter_3__+M*(__iter_1__)];
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t2_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t2_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b2_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b2_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_0__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_0__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_0__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_0__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_0__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_1__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads ();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t3_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t3_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b3_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b3_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_1__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_1__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_1__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_1__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_1__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_2__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+6),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-7),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t4_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t4_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b4_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b4_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_2__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_2__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
__tilevar_3__[__iter_3__-__iter_0__] += __temp_98__;
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+8),2) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-9),(M-3))) {
// Bottom -2
float __temp_2__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_5__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (2 * __temp_2__ + 4 * __temp_5__);
float __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 5 * __temp_9__);
float __temp_13__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 4 * __temp_13__);
float __temp_17__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_18__ = (__temp_14__ + 2 * __temp_17__) / 159;
t5_1 += __temp_18__;
// Bottom -1
float __temp_21__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_25__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_26__ = (4 * __temp_21__ + 9 * __temp_25__);
float __temp_29__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_30__ = (__temp_26__ + 12 * __temp_29__);
float __temp_33__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_34__ = (__temp_30__ + 9 * __temp_33__);
float __temp_37__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_38__ = (__temp_34__ + 4 * __temp_37__) / 159;
t5_0 += __temp_38__;
// Mid
float __temp_41__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_45__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_46__ = (5 * __temp_41__ + 12 * __temp_45__);
float __temp_49__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_50__ = (__temp_46__ + 15 * __temp_49__);
float __temp_53__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_54__ = (__temp_50__ + 12 * __temp_53__);
float __temp_57__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_58__ = (__temp_54__ + 5 * __temp_57__) / 159;
b5_0 += __temp_58__;
// Top +1
float __temp_61__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_65__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_66__ = (4 * __temp_61__ + 9 * __temp_65__);
float __temp_69__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_70__ = (__temp_66__ + 12 * __temp_69__);
float __temp_73__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_74__ = (__temp_70__ + 9 * __temp_73__);
float __temp_77__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_78__ = (__temp_74__ + 4 * __temp_77__) / 159;
b5_1 += __temp_78__;
// Top +2
float __temp_81__ = (__tilevar_3__[__iter_3__-2-__iter_0__]);
float __temp_85__ = (__tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_86__ = (2 * __temp_81__ + 4 * __temp_85__);
float __temp_89__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_90__ = (__temp_86__ + 5 * __temp_89__);
float __temp_93__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_94__ = (__temp_90__ + 4 * __temp_93__);
float __temp_97__ = (__tilevar_3__[__iter_3__+2-__iter_0__]);
float __temp_98__ = (__temp_94__ + 2 * __temp_97__) / 159;
out += __temp_98__;
__var_1__[__iter_3__+M*FORMA_MAX(__iter_1__-8,0)] = out;
}
__syncthreads();
// Now rotate
__tilevar_1__[__iter_3__-__iter_0__] = b2_1; b2_1 = b2_0; b2_0=t2_0; t2_0=t2_1; t2_1=0.0f;
__tilevar_2__[__iter_3__-__iter_0__] = b3_1; b3_1 = b3_0; b3_0=t3_0; t3_0=t3_1; t3_1=0.0f;
__tilevar_3__[__iter_3__-__iter_0__] = b4_1; b4_1 = b4_0; b4_0=t4_0; t4_0=t4_1; t4_1=0.0f;
out=b5_1; b5_1=b5_0; b5_0=t5_0; t5_0=t5_1; t5_1=0.0f;
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = M;
int __size_1___kernel___forma_kernel__0__ = N;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-16);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__size_0___kernel___forma_kernel__0__/32);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<1000; x++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_0___kernel___forma_kernel__0__/32, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
287cb21958dc37ff726f0268199afdc1fe4f8ef9.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __GPU_ARRAY__
#define __GPU_ARRAY__
#include <unordered_map>
#include "BasicGPUArray.h"
#include "utils.h"
#include <vector>
/**
* template for a array shared on the cpu and gpu
* with some helper functions for easy copying
*/
namespace SNN {
template <typename T> class GPUArray: public BasicGPUArray {
private:
/* the hots value of this */
std::vector<T *> hostArrays;
/* the device value of this */
T *device;
/* the size of this */
int length;
/* indicates that we own the host pointer */
bool ownHost;
/* frees the memory of this */
void freeMemory() {
if (length > 0) {
if (hipFree(device))
log_err("hipFree failed", LOG_EE);
if (ownHost)
for (auto &host: hostArrays)
delete host;
device = NULL;
hostArrays.clear();
length = 0;
}
}
/* delete copy constructor */
GPUArray(const GPUArray &other) = delete;
/* delete assignment operator */
GPUArray &operator = (const GPUArray &other) = delete;
public:
/* constructor */
GPUArray(T host) : length(1), device(NULL), ownHost(true) {
this->hostArrays.push_back(new T);
this->hostArrays[0][0] = host;
if (hipMalloc((void **) &device, sizeof(T) * length))
log_err("hipMalloc malloc failed", LOG_EE);
}
GPUArray(T *host, int length) :
device(NULL), length(length), ownHost(false) {
if (length > 0) {
hostArrays.push_back(host);
if (hipMalloc((void **) &device, sizeof(T) * length))
log_err("hipMalloc malloc failed", LOG_EE);
}
}
GPUArray(std::vector<T *> hostArrays, int length) :
hostArrays(hostArrays), device(NULL), length(length), ownHost(false) {
if (length > 0) {
if (hipMalloc((void **) &device, sizeof(T) * length * hostArrays.size()))
log_err("hipMalloc malloc failed", LOG_EE);
}
}
/* destructor */
~GPUArray() {
this->freeMemory();
}
/* copies the value to the gpu */
void copyToDevice() {
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
hipError_t error = hipMemcpy(
device + i * length,
hostArrays[i],
sizeof(T) * length,
hipMemcpyHostToDevice
);
if (error)
log_err("hipMemcpyHostToDevice failed: " + itoa(error), LOG_EE);
}
}
}
/* copies the value to the cpu */
void copyToHost() {
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
hipError_t error = hipMemcpy(
hostArrays[i],
device + i * length,
sizeof(T) * length,
hipMemcpyDeviceToHost
);
if (error)
log_err("hipMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
}
}
}
/* copies the array with the given index to the cpu */
void copyToHost(int i) {
if ((unsigned) i < hostArrays.size()) {
hipError_t error = hipMemcpy(
hostArrays[i],
device + i * length,
sizeof(T) * length,
hipMemcpyDeviceToHost
);
if (error)
log_err("hipMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
}
}
/* copies the array with the given index to the cpu */
void copyToHost(int i, int start, int size) {
if ((unsigned) i < hostArrays.size()) {
hipError_t error = hipMemcpy(
hostArrays[i] + start,
device + i * length + start,
sizeof(T) * size,
hipMemcpyDeviceToHost
);
if (error)
log_err("hipMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
}
}
/* copies the array with the given index to the cpu */
void copyToHost(int start, int size) {
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
hipError_t error = hipMemcpy(
hostArrays[i] + start,
device + i * length + start,
sizeof(T) * size,
hipMemcpyDeviceToHost
);
if (error)
log_err("hipMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
}
}
}
/* copies the array with the given index to the cpu */
void copyToDevice(int start, int size) {
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
hipError_t error = hipMemcpy(
device + i * length + start,
hostArrays[i] + start,
sizeof(T) * size,
hipMemcpyHostToDevice
);
if (error)
log_err("hipMemcpyHostToDevice failed: " + itoa(error), LOG_EE);
}
}
}
/* arry accessoperator for the type of this */
T &at(int i) {
if ((unsigned) i < hostArrays.size())
return hostArrays[i / length][i % length];
log_err("index out of range in GPUArray", LOG_E);
return hostArrays[0][0];
}
/* returns the device pointer */
void *d_ptr() const { return (void *) device; }
/* returns the length of this */
int size() {
return this->length;
}
/* returns the global cuda memory consumption of this */
int globalMemoryConsumption() {
return sizeof(T) * this->length;
}
/* checking the values from the host arrays against the values from the device */
void check() {
T *tmp = (T *) malloc(sizeof(T) * length);
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
hipError_t error = hipMemcpy(
tmp,
device + i * length,
sizeof(T) * length,
hipMemcpyDeviceToHost
);
if (error)
log_err("hipMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
for (int n = 0; n < length; n++) {
FloatType v1 = tmp[n], v2 = hostArrays[i][n];
while (fabs(v1) > 1 || fabs(v2) > 1) {
v1 /= 2;
v2 /= 2;
}
if (fabs(v1 - v2) > 1e-3) {
log_err(
"GPUArray value " + itoa(n) + " differs: host: " +
ftoa(hostArrays[i][n], 20) + ", device: " +
ftoa(tmp[n], 20) + ", error: " +
ftoa(fabs(tmp[n] - hostArrays[i][n]), 20), LOG_W);
}
}
}
}
free(tmp);
}
};
}
#endif /* __GPU_ARRAY__ */
| 287cb21958dc37ff726f0268199afdc1fe4f8ef9.cu | #ifndef __GPU_ARRAY__
#define __GPU_ARRAY__
#include <unordered_map>
#include "BasicGPUArray.h"
#include "utils.h"
#include <vector>
/**
* template for a array shared on the cpu and gpu
* with some helper functions for easy copying
*/
namespace SNN {
template <typename T> class GPUArray: public BasicGPUArray {
private:
/* the hots value of this */
std::vector<T *> hostArrays;
/* the device value of this */
T *device;
/* the size of this */
int length;
/* indicates that we own the host pointer */
bool ownHost;
/* frees the memory of this */
void freeMemory() {
if (length > 0) {
if (cudaFree(device))
log_err("cudaFree failed", LOG_EE);
if (ownHost)
for (auto &host: hostArrays)
delete host;
device = NULL;
hostArrays.clear();
length = 0;
}
}
/* delete copy constructor */
GPUArray(const GPUArray &other) = delete;
/* delete assignment operator */
GPUArray &operator = (const GPUArray &other) = delete;
public:
/* constructor */
GPUArray(T host) : length(1), device(NULL), ownHost(true) {
this->hostArrays.push_back(new T);
this->hostArrays[0][0] = host;
if (cudaMalloc((void **) &device, sizeof(T) * length))
log_err("cudaMalloc malloc failed", LOG_EE);
}
GPUArray(T *host, int length) :
device(NULL), length(length), ownHost(false) {
if (length > 0) {
hostArrays.push_back(host);
if (cudaMalloc((void **) &device, sizeof(T) * length))
log_err("cudaMalloc malloc failed", LOG_EE);
}
}
GPUArray(std::vector<T *> hostArrays, int length) :
hostArrays(hostArrays), device(NULL), length(length), ownHost(false) {
if (length > 0) {
if (cudaMalloc((void **) &device, sizeof(T) * length * hostArrays.size()))
log_err("cudaMalloc malloc failed", LOG_EE);
}
}
/* destructor */
~GPUArray() {
this->freeMemory();
}
/* copies the value to the gpu */
void copyToDevice() {
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
cudaError_t error = cudaMemcpy(
device + i * length,
hostArrays[i],
sizeof(T) * length,
cudaMemcpyHostToDevice
);
if (error)
log_err("cudaMemcpyHostToDevice failed: " + itoa(error), LOG_EE);
}
}
}
/* copies the value to the cpu */
void copyToHost() {
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
cudaError_t error = cudaMemcpy(
hostArrays[i],
device + i * length,
sizeof(T) * length,
cudaMemcpyDeviceToHost
);
if (error)
log_err("cudaMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
}
}
}
/* copies the array with the given index to the cpu */
void copyToHost(int i) {
if ((unsigned) i < hostArrays.size()) {
cudaError_t error = cudaMemcpy(
hostArrays[i],
device + i * length,
sizeof(T) * length,
cudaMemcpyDeviceToHost
);
if (error)
log_err("cudaMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
}
}
/* copies the array with the given index to the cpu */
void copyToHost(int i, int start, int size) {
if ((unsigned) i < hostArrays.size()) {
cudaError_t error = cudaMemcpy(
hostArrays[i] + start,
device + i * length + start,
sizeof(T) * size,
cudaMemcpyDeviceToHost
);
if (error)
log_err("cudaMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
}
}
/* copies the array with the given index to the cpu */
void copyToHost(int start, int size) {
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
cudaError_t error = cudaMemcpy(
hostArrays[i] + start,
device + i * length + start,
sizeof(T) * size,
cudaMemcpyDeviceToHost
);
if (error)
log_err("cudaMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
}
}
}
/* copies the array with the given index to the cpu */
void copyToDevice(int start, int size) {
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
cudaError_t error = cudaMemcpy(
device + i * length + start,
hostArrays[i] + start,
sizeof(T) * size,
cudaMemcpyHostToDevice
);
if (error)
log_err("cudaMemcpyHostToDevice failed: " + itoa(error), LOG_EE);
}
}
}
/* arry accessoperator for the type of this */
T &at(int i) {
if ((unsigned) i < hostArrays.size())
return hostArrays[i / length][i % length];
log_err("index out of range in GPUArray", LOG_E);
return hostArrays[0][0];
}
/* returns the device pointer */
void *d_ptr() const { return (void *) device; }
/* returns the length of this */
int size() {
return this->length;
}
/* returns the global cuda memory consumption of this */
int globalMemoryConsumption() {
return sizeof(T) * this->length;
}
/* checking the values from the host arrays against the values from the device */
void check() {
T *tmp = (T *) malloc(sizeof(T) * length);
for (unsigned i = 0; i < hostArrays.size(); i++) {
if (hostArrays[i] != NULL) {
cudaError_t error = cudaMemcpy(
tmp,
device + i * length,
sizeof(T) * length,
cudaMemcpyDeviceToHost
);
if (error)
log_err("cudaMemcpyDeviceToHost failed: " + itoa(error), LOG_EE);
for (int n = 0; n < length; n++) {
FloatType v1 = tmp[n], v2 = hostArrays[i][n];
while (fabs(v1) > 1 || fabs(v2) > 1) {
v1 /= 2;
v2 /= 2;
}
if (fabs(v1 - v2) > 1e-3) {
log_err(
"GPUArray value " + itoa(n) + " differs: host: " +
ftoa(hostArrays[i][n], 20) + ", device: " +
ftoa(tmp[n], 20) + ", error: " +
ftoa(fabs(tmp[n] - hostArrays[i][n]), 20), LOG_W);
}
}
}
}
free(tmp);
}
};
}
#endif /* __GPU_ARRAY__ */
|
a5ed9bf01326a5b2a8a8ba64e5d333ff71fb7dfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "time.h"
#include "math.h"
#include "house_med.h"
// inverts a matrix A by turning first N columns of A|I into RREF
// # threads = 2N
// each thread corresponds to a particular column
// perform division on row to turn leading nonzero into a 1
// perform elimination on all other rows to make pivot column 0s
// O(Ax^2) time
// O(Ay) work
__global__ void MatrixInverse(double *A, int Ax, int Ay) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
double mult;
double to_mult;
double old_val;
int current_pivot_col = 0;
int i = 0;
for (i = 0; i < Ax; i++) {
// SWAP CODE
if (i == col && A[i*Ay + col] == 0) {
for (int k = i; k < Ax; k++) {
if (A[k*Ay + col] != 0) {
for (int x = 0; x < Ay; x++) {
int tmp = A[i*Ay + x];
A[i*Ay + x] = A[k*Ay + x];
A[k*Ay + x] = tmp;
}
break;
}
}
}
// divide element by pivot
__syncthreads();
A[i*Ay + col] = A[i*Ay + col] / A[i*Ay + i];
__syncthreads();
for (int j = 0; j < Ax; j++) {
mult = A[j*Ay + i]; // current row, pivot column
to_mult = A[i*Ay + col]; // pivot row, current column
old_val = A[j*Ay + col]; // current row, current column
if ((j != i)) {
A[j*Ay + col] = old_val - mult * to_mult;
}
}
__syncthreads();
}
}
// Function that appends an identity matrix to the right of the current matrix
// keeping new matrix in row major form
// constant time in parallel
// assume that dst has 2*N*N = 2*len(src) allocated
// O(1) time
// O(Ax * Ay) work
__global__ void MatrixAppendIdentity(double* src, double* dst, int num_row, int num_col) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i % (2 * num_col) < num_col) {
dst[i] = src[(num_row*(i / (2 * num_row))) + (i % (2 * num_row))];
}
else if ((i % (2 * num_row) - num_row == i / (2 * num_row))) {
dst[i] = 1;
}
else {
dst[i] = 0;
}
}
// Extracts the inverse matrix from the identity matrix
// O(1) time
// O(Ax * Ay) work
__global__ void ExtractInverse(double *src, double* dst, int num_row, int num_col) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i % (2 * num_col) >= num_col) {
dst[(num_row*(i / (2 * num_row))) + (i % (2 * num_row) - num_row)] = src[i];
}
}
// adds arrays A and B and stores the result in C
// assume all arrays have the same dimensions
// O(1) time
// O(Ax * Ay) work
__global__ void MatrixAdd(double * A, double * B, double * C) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
C[x] = A[x] + B[x];
}
// performs scalar multiplication on matrix A and scalar X
// stores result in B
// O(1) time
// O(Ax * Ay) work
__global__ void MatrixSMul(double * A, double * B, double scalar) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
B[x] = A[x] * scalar;
}
// Transpose function, A is input, B is output, Ax and Ay are the dimensions of A
// O(1) time
// O(Ax * Ay)
__global__ void MatrixTranspose(double * A, double * B, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int new_row, new_loc;
if (x == 0) {
new_loc = 0;
}
else {
new_row = (x % Ax) * Ay;
new_loc = new_row + (x / Ax);
}
B[new_loc] = A[x];
}
// Multiplies the matrices A and B and stores them into C
// Ax, Ay, Bx, By are the dimensions
// Use a thread for each element of the final C array
// O(Ax) time
// O(Bx * Ay) work
__global__ void MatrixMul(double * A, double * B, double * C, int Ax, int Ay, int Bx, int By) {
if (Ax == By) {
// total array position
int x = blockIdx.x * blockDim.x + threadIdx.x;
// reset C array
C[x] = 0;
__syncthreads();
int count;
int Aindex, Bindex;
double prod;
for (count = 0; count < Ax; count++) {
// row of C matrix
Aindex = (x / Bx) * Ax + count;
// column of C matrix
Bindex = (x % Bx) + Bx * count;
prod = A[Aindex] * B[Bindex];
C[x] += prod;
}
}
}
// Adds the value lambda to the diagonal of the input matrix
// O(1) time
// O(Ax) work
__global__ void AddLambdaToDiagonal(double * A, double lambda, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int idx = Ay * x + x % Ay;
if (idx != 0) {
A[idx] = A[idx] + lambda;
}
}
// Function that appends a column of 1s to a matrix
// keeping new matrix in row major form
// constant time in parallel
// assume that dst has M x (N + 1)
// O(1) time
// O(Ax * Ay) work
__global__ void AppendOne(double* src, double* dst, int num_row, int num_col) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int new_index = x + (x / num_col) + 1;
dst[new_index] = src[x];
if (new_index % (num_col + 1) == 1) {
dst[new_index - 1] = 1;
}
}
// takes an array of doubles and its dimensions as input
// sets the array to (((A^t)(A))^-1)(A^t)B
// where A is an array of doubles size Ay (observations) * Ax (features)
// and B is a vector containing Ay elements
// C is the output array of doubles size with Ax + 1. must be allocated before.
// Lambda is the value used for regularization
// O(Ay) time
// O(Ax * Ay) work
// "fit"
extern "C" {
void get_beta(double * A, double * B, double * C, int Ax, int Ay, double lambda) {
int x;
double * MatA = (double *)malloc(Ax * Ay * sizeof(double));
double * MatA1 = (double *)malloc((Ax + 1) * Ay * sizeof(double));
double * MatB = (double *)malloc((Ax + 1) * Ay * sizeof(double));
double * MatC = (double *)malloc((Ax + 1) * (Ax + 1) * sizeof(double));
double * MatD = (double *)malloc(2 * (Ax + 1) * (Ax + 1) * sizeof(double));
double * MatA_d;
double * MatA1_d;
double * MatB_d;
double * MatC_d;
double * MatD_d;
double * MatE_d;
double * Beta_d;
hipMalloc((void **)&MatA_d, Ax * Ay * sizeof(double));
hipMalloc((void **)&MatA1_d, (Ax + 1) * Ay * sizeof(double));
hipMalloc((void **)&MatB_d, (Ax + 1) * Ay * sizeof(double));
hipMalloc((void **)&MatC_d, (Ax + 1) * (Ax + 1) * sizeof(double));
hipMalloc((void **)&MatD_d, 2 * (Ax + 1) * (Ax + 1) * sizeof(double));
hipMalloc((void **)&MatE_d, Ay * sizeof(double));
hipMalloc((void **)&Beta_d, (Ax + 1) * sizeof(double));
hipMemcpy(MatA_d, A, Ax * Ay * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(MatE_d, B, Ay * sizeof(double), hipMemcpyHostToDevice);
// Append 1s A
AppendOne << < Ax, Ay >> > (MatA_d, MatA1_d, Ay, Ax); // O(1)
// Add new column
Ax++;
// B = Transpose(A)
MatrixTranspose << < Ax, Ay >> > (MatA1_d, MatB_d, Ax, Ay); // O(1)
// C = BA
MatrixMul << <Ax, Ax >> > (MatB_d, MatA1_d, MatC_d, Ay, Ax, Ax, Ay); // O(Ay)
// Regularization C = C - lambda*I
AddLambdaToDiagonal << <Ax, 1 >> > (MatC_d, lambda, Ax, Ax); // O(1)
// Invert C
MatrixAppendIdentity << <Ax, 2 * Ax >> > (MatC_d, MatD_d, Ax, Ax); // O(1)
MatrixInverse << <1, 2 * Ax >> > (MatD_d, Ax, 2 * Ax); // O(Ax)
ExtractInverse << <Ax, 2 * Ax >> > (MatD_d, MatC_d, Ax, Ax); // O(1)
// A = CB
MatrixMul << <Ax, Ay >> > (MatC_d, MatB_d, MatA1_d, Ax, Ax, Ay, Ax); // O(Ax)
// Beta = AE
// E is the known vector
MatrixMul << <1, Ax >> > (MatA1_d, MatE_d, Beta_d, Ay, Ax, 1, Ay); // O(Ay)
// return Beta
hipMemcpy(C, Beta_d, Ax * sizeof(double), hipMemcpyDeviceToHost);
// free resources
free(MatA);
free(MatA1);
free(MatB);
free(MatC);
free(MatD);
hipFree(MatA_d);
hipFree(MatA1_d);
hipFree(MatB_d);
hipFree(MatC_d);
hipFree(MatD_d);
hipFree(MatE_d);
hipFree(Beta_d);
}
// Performs matrix multiplication on A and B
// A is an array of known values of doubles with Ay rows and Ax columns
// B is an array with Ax doubles. Beta vector
// C is the output array of Ay doubles. must be allocated before
// O(Ax) time
// O(Ax * Ay) work
// "predict"
void linreg(double * A, double * B, double * C, int Ax, int Ay) {
double * MatA = (double *)malloc(Ax * Ay * sizeof(double));
double * MatA1 = (double *)malloc((Ax + 1) * Ay * sizeof(double));
double * MatB = (double *)malloc((Ax + 1) * sizeof(double));
double * MatC = (double *)malloc(Ay * sizeof(double));
double * MatA_d;
double * MatA1_d;
double * MatB_d;
int x;
double * MatC_d;
hipMalloc((void **)&MatA_d, Ax * Ay * sizeof(double));
hipMalloc((void **)&MatA1_d, (Ax + 1) * Ay * sizeof(double));
hipMalloc((void **)&MatB_d, (Ax + 1) * sizeof(double));
hipMalloc((void **)&MatC_d, Ay * sizeof(double));
hipMemcpy(MatA_d, A, Ax * Ay * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(MatB_d, B, (Ax + 1) * sizeof(double), hipMemcpyHostToDevice);
// Append 1s to A
AppendOne << <Ax, Ay >> > (MatA_d, MatA1_d, Ay, Ax); // O(1)
// Add a column
Ax++;
// C = AB
MatrixMul << <1, Ay >> > (MatA1_d, MatB_d, MatC_d, Ax, Ay, 1, Ax); // O(Ax)
// return C
hipMemcpy(C, MatC_d, Ay * sizeof(double), hipMemcpyDeviceToHost);
// free resources
free(MatA);
free(MatA1);
free(MatB);
free(MatC);
hipFree(MatA_d);
hipFree(MatA1_d);
hipFree(MatB_d);
hipFree(MatC_d);
}
// Training set is the data the line will be fit to
// Known values corrospond to the training set
// Test set will be used to test the line of best fit
// Test values are the actual values of the test set
// X is the number of features in the dataset.
// Y is the number of elements in the training set. should be less than 1024
// Yt is the number of elements in the test set
void linreg_test(double * training_set, double * known_values, double * test_set, double * test_values, int X, int Y, int Yt)
{
int AX = X;
int AY = Y;
int BX = 1;
int BY = Y;
// Training data arrays
int Asize = AX * AY * sizeof(double);
int A2size = AX * (AY / 10) * sizeof(double);
int A3size = AX * (AY / 100) * sizeof(double);
int AarrSize = AX * AY;
double * MatA = (double *)malloc(Asize);
double * MatA2 = (double *)malloc(A2size);
double * MatA3 = (double *)malloc(A3size);
memcpy(MatA, training_set, Asize);
memcpy(MatA2, training_set, A2size);
memcpy(MatA3, training_set, A3size);
// Known price arrays
int Bsize = BX * BY * sizeof(double);
int B2size = BX * (BY / 10) * sizeof(double);
int B3size = BX * (BY / 100) * sizeof(double);
int BarrSize = BX * BY;
double * MatB = (double *)malloc(Bsize);
double * MatB2 = (double *)malloc(Bsize);
double * MatB3 = (double *)malloc(Bsize);
memcpy(MatB, known_values, Bsize);
memcpy(MatB2, known_values, B2size);
memcpy(MatB3, known_values, B3size);
// Output Arrays
int Csize = (AX + 1) * sizeof(double);
int CarrSize = AX + 1;
double * MatC = (double *)malloc(Csize);
double * MatC2 = (double *)malloc(Csize);
double * MatC3 = (double *)malloc(Csize);
double * MatD = (double *)malloc(Yt * AX * sizeof(double));
double * MatE = (double *)malloc(Yt * sizeof(double));
double * MatE2 = (double *)malloc(Yt * sizeof(double));
double * MatE3 = (double *)malloc(Yt * sizeof(double));
memcpy(MatD, test_set, Yt * AX * sizeof(double));
// Set up timing variables
clock_t start, end;
double time3, time2, time;
int x;
// Test with 1/100 training observations
// Fit a line to the training data
start = clock();
get_beta(MatA3, MatB3, MatC3, AX, (AY / 100), 75);
end = clock();
time3 = ((double)(end - start)) / CLOCKS_PER_SEC;
// Apply the beta vector to the input data to get the predicted values
linreg(MatD, MatC3, MatE3, AX, Yt);
// Test with 1/10 training observations
start = clock();
get_beta(MatA2, MatB2, MatC2, AX, (AY / 10), 75);
end = clock();
time2 = ((double)(end - start)) / CLOCKS_PER_SEC;
linreg(MatD, MatC2, MatE2, AX, Yt);
// Test with all training observations
start = clock();
get_beta(MatA, MatB, MatC, AX, AY, 75);
end = clock();
time = ((double)(end - start)) / CLOCKS_PER_SEC;
linreg(MatD, MatC, MatE, AX, Yt);;
// Print test reports
double to_add, to_add2, to_add3;
double sum = 0;
double sum2 = 0;
double sum3 = 0;
double sum_s = 0;
double sum_s2 = 0;
double sum_s3 = 0;
// Calculate errors
for (x = 0; x < Yt; x++) {
// get the error
to_add = (MatE[x] - test_values[x]);
to_add2 = (MatE2[x] - test_values[x]);
to_add3 = (MatE3[x] - test_values[x]);
// absolute value
if (to_add < 0) {
to_add = to_add * -1;
}
if (to_add2 < 0) {
to_add2 = to_add2 * -1;
}
if (to_add3 < 0) {
to_add3 = to_add3 * -1;
}
// update the sum and sum of squares
sum += to_add;
sum2 += to_add2;
sum3 += to_add3;
sum_s += (to_add * to_add);
sum_s2 += (to_add2 * to_add2);
sum_s3 += (to_add3 * to_add3);
}
// calculate average error
sum = sum / Yt;
sum2 = sum2 / Yt;
sum3 = sum3 / Yt;
// calculate RMSE
sum_s = sqrt(sum_s / Yt);
sum_s2 = sqrt(sum_s2 / Yt);
sum_s3 = sqrt(sum_s3 / Yt);
// Print 10 element test report
printf("Results for %d element test:\n\n", (AY / 100));
printf("Beta = \n");
for (x = 0; x < (AX + 1); x++) {
printf("%f\n", MatC3[x]);
}
printf("\n");
for (x = 0; x < Yt; x++) {
if (x % 2 == 0 && x != 0) {
printf("\n");
printf("Predicted: %f \tActual: %f\t\t", MatE3[x], test_values[x]);
}
else {
printf("Predicted: %f \tActual: %f\t\t", MatE3[x], test_values[x]);
}
}
printf("\n");
printf("Best fit calculation time: %f\n", time3);
printf("Average error: %f\n", sum3);
printf("RMSE: %f\n\n\n", sum_s3);
// Print 100 element test report
printf("Results for %d element test:\n\n", (AY / 10));
printf("Beta = \n");
for (x = 0; x < (AX + 1); x++) {
printf("%f\n", MatC2[x]);
}
printf("\n");
for (x = 0; x < Yt; x++) {
if (x % 2 == 0 && x != 0) {
printf("\n");
printf("Predicted: %f \tActual: %f\t\t", MatE2[x], test_values[x]);
}
else {
printf("Predicted: %f \tActual: %f\t\t", MatE2[x], test_values[x]);
}
}
printf("\n");
printf("Best fit calculation time: %f\n", time2);
printf("Average error: %f\n", sum2);
printf("RMSE: %f\n\n\n", sum_s2);
// Print 1000 element test report
printf("Results for %d element test:\n\n", AY);
printf("Beta = \n");
for (x = 0; x < (AX + 1); x++) {
printf("%f\n", MatC[x]);
}
printf("\n");
for (x = 0; x < Yt; x++) {
if (x % 2 == 0 && x != 0) {
printf("\n");
printf("Predicted: %f \tActual: %f\t\t", MatE[x], test_values[x]);
}
else {
printf("Predicted: %f \tActual: %f\t\t", MatE[x], test_values[x]);
}
}
printf("\n");
printf("Best fit calculation time: %f\n", time);
printf("Average error: %f\n", sum);
printf("RMSE: %f\n\n\n", sum_s);
// free resources
free(MatA);
free(MatB);
free(MatC);
free(MatD);
free(MatE);
}
}
int main() {
linreg_test(houses_m, prices_m, test_houses_m, real_prices_m, features, training_size, test_size);
return 0;
} | a5ed9bf01326a5b2a8a8ba64e5d333ff71fb7dfa.cu | #include "cuda_runtime.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "time.h"
#include "math.h"
#include "house_med.h"
// inverts a matrix A by turning first N columns of A|I into RREF
// # threads = 2N
// each thread corresponds to a particular column
// perform division on row to turn leading nonzero into a 1
// perform elimination on all other rows to make pivot column 0s
// O(Ax^2) time
// O(Ay) work
__global__ void MatrixInverse(double *A, int Ax, int Ay) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
double mult;
double to_mult;
double old_val;
int current_pivot_col = 0;
int i = 0;
for (i = 0; i < Ax; i++) {
// SWAP CODE
if (i == col && A[i*Ay + col] == 0) {
for (int k = i; k < Ax; k++) {
if (A[k*Ay + col] != 0) {
for (int x = 0; x < Ay; x++) {
int tmp = A[i*Ay + x];
A[i*Ay + x] = A[k*Ay + x];
A[k*Ay + x] = tmp;
}
break;
}
}
}
// divide element by pivot
__syncthreads();
A[i*Ay + col] = A[i*Ay + col] / A[i*Ay + i];
__syncthreads();
for (int j = 0; j < Ax; j++) {
mult = A[j*Ay + i]; // current row, pivot column
to_mult = A[i*Ay + col]; // pivot row, current column
old_val = A[j*Ay + col]; // current row, current column
if ((j != i)) {
A[j*Ay + col] = old_val - mult * to_mult;
}
}
__syncthreads();
}
}
// Function that appends an identity matrix to the right of the current matrix
// keeping new matrix in row major form
// constant time in parallel
// assume that dst has 2*N*N = 2*len(src) allocated
// O(1) time
// O(Ax * Ay) work
__global__ void MatrixAppendIdentity(double* src, double* dst, int num_row, int num_col) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i % (2 * num_col) < num_col) {
dst[i] = src[(num_row*(i / (2 * num_row))) + (i % (2 * num_row))];
}
else if ((i % (2 * num_row) - num_row == i / (2 * num_row))) {
dst[i] = 1;
}
else {
dst[i] = 0;
}
}
// Extracts the inverse matrix from the identity matrix
// O(1) time
// O(Ax * Ay) work
__global__ void ExtractInverse(double *src, double* dst, int num_row, int num_col) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i % (2 * num_col) >= num_col) {
dst[(num_row*(i / (2 * num_row))) + (i % (2 * num_row) - num_row)] = src[i];
}
}
// adds arrays A and B and stores the result in C
// assume all arrays have the same dimensions
// O(1) time
// O(Ax * Ay) work
__global__ void MatrixAdd(double * A, double * B, double * C) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
C[x] = A[x] + B[x];
}
// performs scalar multiplication on matrix A and scalar X
// stores result in B
// O(1) time
// O(Ax * Ay) work
__global__ void MatrixSMul(double * A, double * B, double scalar) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
B[x] = A[x] * scalar;
}
// Transpose function, A is input, B is output, Ax and Ay are the dimensions of A
// O(1) time
// O(Ax * Ay)
__global__ void MatrixTranspose(double * A, double * B, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int new_row, new_loc;
if (x == 0) {
new_loc = 0;
}
else {
new_row = (x % Ax) * Ay;
new_loc = new_row + (x / Ax);
}
B[new_loc] = A[x];
}
// Multiplies the matrices A and B and stores them into C
// Ax, Ay, Bx, By are the dimensions
// Use a thread for each element of the final C array
// O(Ax) time
// O(Bx * Ay) work
__global__ void MatrixMul(double * A, double * B, double * C, int Ax, int Ay, int Bx, int By) {
if (Ax == By) {
// total array position
int x = blockIdx.x * blockDim.x + threadIdx.x;
// reset C array
C[x] = 0;
__syncthreads();
int count;
int Aindex, Bindex;
double prod;
for (count = 0; count < Ax; count++) {
// row of C matrix
Aindex = (x / Bx) * Ax + count;
// column of C matrix
Bindex = (x % Bx) + Bx * count;
prod = A[Aindex] * B[Bindex];
C[x] += prod;
}
}
}
// Adds the value lambda to the diagonal of the input matrix
// O(1) time
// O(Ax) work
__global__ void AddLambdaToDiagonal(double * A, double lambda, int Ax, int Ay) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int idx = Ay * x + x % Ay;
if (idx != 0) {
A[idx] = A[idx] + lambda;
}
}
// Function that appends a column of 1s to a matrix
// keeping new matrix in row major form
// constant time in parallel
// assume that dst has M x (N + 1)
// O(1) time
// O(Ax * Ay) work
__global__ void AppendOne(double* src, double* dst, int num_row, int num_col) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int new_index = x + (x / num_col) + 1;
dst[new_index] = src[x];
if (new_index % (num_col + 1) == 1) {
dst[new_index - 1] = 1;
}
}
// takes an array of doubles and its dimensions as input
// sets the array to (((A^t)(A))^-1)(A^t)B
// where A is an array of doubles size Ay (observations) * Ax (features)
// and B is a vector containing Ay elements
// C is the output array of doubles size with Ax + 1. must be allocated before.
// Lambda is the value used for regularization
// O(Ay) time
// O(Ax * Ay) work
// "fit"
extern "C" {
void get_beta(double * A, double * B, double * C, int Ax, int Ay, double lambda) {
int x;
double * MatA = (double *)malloc(Ax * Ay * sizeof(double));
double * MatA1 = (double *)malloc((Ax + 1) * Ay * sizeof(double));
double * MatB = (double *)malloc((Ax + 1) * Ay * sizeof(double));
double * MatC = (double *)malloc((Ax + 1) * (Ax + 1) * sizeof(double));
double * MatD = (double *)malloc(2 * (Ax + 1) * (Ax + 1) * sizeof(double));
double * MatA_d;
double * MatA1_d;
double * MatB_d;
double * MatC_d;
double * MatD_d;
double * MatE_d;
double * Beta_d;
cudaMalloc((void **)&MatA_d, Ax * Ay * sizeof(double));
cudaMalloc((void **)&MatA1_d, (Ax + 1) * Ay * sizeof(double));
cudaMalloc((void **)&MatB_d, (Ax + 1) * Ay * sizeof(double));
cudaMalloc((void **)&MatC_d, (Ax + 1) * (Ax + 1) * sizeof(double));
cudaMalloc((void **)&MatD_d, 2 * (Ax + 1) * (Ax + 1) * sizeof(double));
cudaMalloc((void **)&MatE_d, Ay * sizeof(double));
cudaMalloc((void **)&Beta_d, (Ax + 1) * sizeof(double));
cudaMemcpy(MatA_d, A, Ax * Ay * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(MatE_d, B, Ay * sizeof(double), cudaMemcpyHostToDevice);
// Append 1s A
AppendOne << < Ax, Ay >> > (MatA_d, MatA1_d, Ay, Ax); // O(1)
// Add new column
Ax++;
// B = Transpose(A)
MatrixTranspose << < Ax, Ay >> > (MatA1_d, MatB_d, Ax, Ay); // O(1)
// C = BA
MatrixMul << <Ax, Ax >> > (MatB_d, MatA1_d, MatC_d, Ay, Ax, Ax, Ay); // O(Ay)
// Regularization C = C - lambda*I
AddLambdaToDiagonal << <Ax, 1 >> > (MatC_d, lambda, Ax, Ax); // O(1)
// Invert C
MatrixAppendIdentity << <Ax, 2 * Ax >> > (MatC_d, MatD_d, Ax, Ax); // O(1)
MatrixInverse << <1, 2 * Ax >> > (MatD_d, Ax, 2 * Ax); // O(Ax)
ExtractInverse << <Ax, 2 * Ax >> > (MatD_d, MatC_d, Ax, Ax); // O(1)
// A = CB
MatrixMul << <Ax, Ay >> > (MatC_d, MatB_d, MatA1_d, Ax, Ax, Ay, Ax); // O(Ax)
// Beta = AE
// E is the known vector
MatrixMul << <1, Ax >> > (MatA1_d, MatE_d, Beta_d, Ay, Ax, 1, Ay); // O(Ay)
// return Beta
cudaMemcpy(C, Beta_d, Ax * sizeof(double), cudaMemcpyDeviceToHost);
// free resources
free(MatA);
free(MatA1);
free(MatB);
free(MatC);
free(MatD);
cudaFree(MatA_d);
cudaFree(MatA1_d);
cudaFree(MatB_d);
cudaFree(MatC_d);
cudaFree(MatD_d);
cudaFree(MatE_d);
cudaFree(Beta_d);
}
// Performs matrix multiplication on A and B
// A is an array of known values of doubles with Ay rows and Ax columns
// B is an array with Ax doubles. Beta vector
// C is the output array of Ay doubles. must be allocated before
// O(Ax) time
// O(Ax * Ay) work
// "predict"
void linreg(double * A, double * B, double * C, int Ax, int Ay) {
double * MatA = (double *)malloc(Ax * Ay * sizeof(double));
double * MatA1 = (double *)malloc((Ax + 1) * Ay * sizeof(double));
double * MatB = (double *)malloc((Ax + 1) * sizeof(double));
double * MatC = (double *)malloc(Ay * sizeof(double));
double * MatA_d;
double * MatA1_d;
double * MatB_d;
int x;
double * MatC_d;
cudaMalloc((void **)&MatA_d, Ax * Ay * sizeof(double));
cudaMalloc((void **)&MatA1_d, (Ax + 1) * Ay * sizeof(double));
cudaMalloc((void **)&MatB_d, (Ax + 1) * sizeof(double));
cudaMalloc((void **)&MatC_d, Ay * sizeof(double));
cudaMemcpy(MatA_d, A, Ax * Ay * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(MatB_d, B, (Ax + 1) * sizeof(double), cudaMemcpyHostToDevice);
// Append 1s to A
AppendOne << <Ax, Ay >> > (MatA_d, MatA1_d, Ay, Ax); // O(1)
// Add a column
Ax++;
// C = AB
MatrixMul << <1, Ay >> > (MatA1_d, MatB_d, MatC_d, Ax, Ay, 1, Ax); // O(Ax)
// return C
cudaMemcpy(C, MatC_d, Ay * sizeof(double), cudaMemcpyDeviceToHost);
// free resources
free(MatA);
free(MatA1);
free(MatB);
free(MatC);
cudaFree(MatA_d);
cudaFree(MatA1_d);
cudaFree(MatB_d);
cudaFree(MatC_d);
}
// Training set is the data the line will be fit to
// Known values corrospond to the training set
// Test set will be used to test the line of best fit
// Test values are the actual values of the test set
// X is the number of features in the dataset.
// Y is the number of elements in the training set. should be less than 1024
// Yt is the number of elements in the test set
void linreg_test(double * training_set, double * known_values, double * test_set, double * test_values, int X, int Y, int Yt)
{
int AX = X;
int AY = Y;
int BX = 1;
int BY = Y;
// Training data arrays
int Asize = AX * AY * sizeof(double);
int A2size = AX * (AY / 10) * sizeof(double);
int A3size = AX * (AY / 100) * sizeof(double);
int AarrSize = AX * AY;
double * MatA = (double *)malloc(Asize);
double * MatA2 = (double *)malloc(A2size);
double * MatA3 = (double *)malloc(A3size);
memcpy(MatA, training_set, Asize);
memcpy(MatA2, training_set, A2size);
memcpy(MatA3, training_set, A3size);
// Known price arrays
int Bsize = BX * BY * sizeof(double);
int B2size = BX * (BY / 10) * sizeof(double);
int B3size = BX * (BY / 100) * sizeof(double);
int BarrSize = BX * BY;
double * MatB = (double *)malloc(Bsize);
double * MatB2 = (double *)malloc(Bsize);
double * MatB3 = (double *)malloc(Bsize);
memcpy(MatB, known_values, Bsize);
memcpy(MatB2, known_values, B2size);
memcpy(MatB3, known_values, B3size);
// Output Arrays
int Csize = (AX + 1) * sizeof(double);
int CarrSize = AX + 1;
double * MatC = (double *)malloc(Csize);
double * MatC2 = (double *)malloc(Csize);
double * MatC3 = (double *)malloc(Csize);
double * MatD = (double *)malloc(Yt * AX * sizeof(double));
double * MatE = (double *)malloc(Yt * sizeof(double));
double * MatE2 = (double *)malloc(Yt * sizeof(double));
double * MatE3 = (double *)malloc(Yt * sizeof(double));
memcpy(MatD, test_set, Yt * AX * sizeof(double));
// Set up timing variables
clock_t start, end;
double time3, time2, time;
int x;
// Test with 1/100 training observations
// Fit a line to the training data
start = clock();
get_beta(MatA3, MatB3, MatC3, AX, (AY / 100), 75);
end = clock();
time3 = ((double)(end - start)) / CLOCKS_PER_SEC;
// Apply the beta vector to the input data to get the predicted values
linreg(MatD, MatC3, MatE3, AX, Yt);
// Test with 1/10 training observations
start = clock();
get_beta(MatA2, MatB2, MatC2, AX, (AY / 10), 75);
end = clock();
time2 = ((double)(end - start)) / CLOCKS_PER_SEC;
linreg(MatD, MatC2, MatE2, AX, Yt);
// Test with all training observations
start = clock();
get_beta(MatA, MatB, MatC, AX, AY, 75);
end = clock();
time = ((double)(end - start)) / CLOCKS_PER_SEC;
linreg(MatD, MatC, MatE, AX, Yt);;
// Print test reports
double to_add, to_add2, to_add3;
double sum = 0;
double sum2 = 0;
double sum3 = 0;
double sum_s = 0;
double sum_s2 = 0;
double sum_s3 = 0;
// Calculate errors
for (x = 0; x < Yt; x++) {
// get the error
to_add = (MatE[x] - test_values[x]);
to_add2 = (MatE2[x] - test_values[x]);
to_add3 = (MatE3[x] - test_values[x]);
// absolute value
if (to_add < 0) {
to_add = to_add * -1;
}
if (to_add2 < 0) {
to_add2 = to_add2 * -1;
}
if (to_add3 < 0) {
to_add3 = to_add3 * -1;
}
// update the sum and sum of squares
sum += to_add;
sum2 += to_add2;
sum3 += to_add3;
sum_s += (to_add * to_add);
sum_s2 += (to_add2 * to_add2);
sum_s3 += (to_add3 * to_add3);
}
// calculate average error
sum = sum / Yt;
sum2 = sum2 / Yt;
sum3 = sum3 / Yt;
// calculate RMSE
sum_s = sqrt(sum_s / Yt);
sum_s2 = sqrt(sum_s2 / Yt);
sum_s3 = sqrt(sum_s3 / Yt);
// Print 10 element test report
printf("Results for %d element test:\n\n", (AY / 100));
printf("Beta = \n");
for (x = 0; x < (AX + 1); x++) {
printf("%f\n", MatC3[x]);
}
printf("\n");
for (x = 0; x < Yt; x++) {
if (x % 2 == 0 && x != 0) {
printf("\n");
printf("Predicted: %f \tActual: %f\t\t", MatE3[x], test_values[x]);
}
else {
printf("Predicted: %f \tActual: %f\t\t", MatE3[x], test_values[x]);
}
}
printf("\n");
printf("Best fit calculation time: %f\n", time3);
printf("Average error: %f\n", sum3);
printf("RMSE: %f\n\n\n", sum_s3);
// Print 100 element test report
printf("Results for %d element test:\n\n", (AY / 10));
printf("Beta = \n");
for (x = 0; x < (AX + 1); x++) {
printf("%f\n", MatC2[x]);
}
printf("\n");
for (x = 0; x < Yt; x++) {
if (x % 2 == 0 && x != 0) {
printf("\n");
printf("Predicted: %f \tActual: %f\t\t", MatE2[x], test_values[x]);
}
else {
printf("Predicted: %f \tActual: %f\t\t", MatE2[x], test_values[x]);
}
}
printf("\n");
printf("Best fit calculation time: %f\n", time2);
printf("Average error: %f\n", sum2);
printf("RMSE: %f\n\n\n", sum_s2);
// Print 1000 element test report
printf("Results for %d element test:\n\n", AY);
printf("Beta = \n");
for (x = 0; x < (AX + 1); x++) {
printf("%f\n", MatC[x]);
}
printf("\n");
for (x = 0; x < Yt; x++) {
if (x % 2 == 0 && x != 0) {
printf("\n");
printf("Predicted: %f \tActual: %f\t\t", MatE[x], test_values[x]);
}
else {
printf("Predicted: %f \tActual: %f\t\t", MatE[x], test_values[x]);
}
}
printf("\n");
printf("Best fit calculation time: %f\n", time);
printf("Average error: %f\n", sum);
printf("RMSE: %f\n\n\n", sum_s);
// free resources
free(MatA);
free(MatB);
free(MatC);
free(MatD);
free(MatE);
}
}
int main() {
linreg_test(houses_m, prices_m, test_houses_m, real_prices_m, features, training_size, test_size);
return 0;
} |
e99781bad96b2893e7a17fd6f38ade022107dc7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__device__ double norm_calc_device;
__global__ void JacobiKernel(double *u, double *u_new, int N, double h_sq) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( (idx>N) && (idx%N!=0) && (idx%N!=N-1) && (idx/N < N-1))
*(u_new + idx) = 0.25 * ((h_sq)+ *(u + idx - N) + *(u + idx - 1) + *(u + idx + N) + *(u + idx + 1));
}
double norm (double *u , int N, double h_sq)
{
double norm_2d = 0.0;
for(int i = 1 ; i < N-1 ; i++)
for(int j = 1; j < N-1 ; j++){
double temp = 0.0;
temp+=4.0 * *(u + i*N + j);
temp-= *(u + (i-1)*N + j);
temp-= *(u + i*N + (j-1));
temp-= *(u + (i+1)*N + j);
temp-= *(u + i*N + (j+1));
temp/=h_sq;
norm_2d+= pow((temp-1.0),2);
}
norm_2d = sqrt(norm_2d);
return norm_2d;
}
__global__ void normKernel(double *u , int N , double h_sq){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
if( (idx>N) && (idx%N!=0) && (idx%N!=N-1) && (idx/N < N-1)){
temp+=4.0 * *(u + idx);
temp-= *(u + idx - N);
temp-= *(u + idx - 1);
temp-= *(u + idx + N);
temp-= *(u + idx + 1);
temp/=h_sq;
temp = pow((temp-1.0),2);
}
atomicAdd(&norm_calc_device, temp);
}
void jacobi(double *u, double * u_new, int N, double h, double h_sq, double norm_init)
{
printf("Jacobi Method:\n");
int iter = 1;
double norm_calc = norm_init;
int max_iter = 1000;
printf("\nInitial Norm:%f\n", norm_init);
while(norm_calc * 1000000 > norm_init && iter <= max_iter)
{
for(int i = 1; i < N-1 ; i++)
for(int j = 1 ; j < N-1 ; j++)
*(u_new + i*N + j) = 0.25 * ((h_sq)+ *(u + (i-1)*N + j) + *(u + i*N + (j-1)) + *(u + (i+1)*N + j) + *(u + i*N + (j+1)));
swap(u,u_new);
norm_calc = norm( u, N,h_sq);
iter++;
}
printf("\nFinal Norm:%f\n", norm_calc);
}
void jacobiGPU(double *u, double * u_new, int N, double h, double h_sq, double norm_init, double *u_device, double *u_new_device)
{
printf("Jacobi Method:\n");
int iter = 1;
double norm_calc = norm_init;
int max_iter = 1000;
printf("\nInitial Norm:%f\n", norm_init);
hipMemcpy(u_device, u, N*N*sizeof(double), hipMemcpyHostToDevice);
while(norm_calc * 1000000 > norm_init && iter <= max_iter)
{
hipLaunchKernelGGL(( JacobiKernel), dim3(N*N/1024+1),dim3(1024), 0, 0, u_device, u_new_device, N,h_sq);
hipDeviceSynchronize();
hipMemcpy(u_device, u_new_device, N*N*sizeof(double), hipMemcpyDeviceToDevice);
norm_calc = 0.0;
hipMemcpyToSymbol(norm_calc_device, &norm_calc, sizeof(double) );
hipLaunchKernelGGL(( normKernel), dim3(N*N/1024+1),dim3(1024), 0, 0, u_device, N,h_sq);
hipMemcpyFromSymbol(&norm_calc, norm_calc_device, sizeof(double), 0, hipMemcpyDeviceToHost);
norm_calc = sqrt(norm_calc);
iter++;
}
hipMemcpy(u, u_new_device, N*N*sizeof(double), hipMemcpyDeviceToHost);
printf("\nFinal Norm:%f\n", norm_calc);
}
int main(int argc, char **argv)
{
int N;
double h, h_sq;
N = 1000;
if(argc == 2)
N = atoi(argv[1]);
h = (double)1/(double)(N+1);
h_sq = h*h;
double *u, *u_new;
hipHostMalloc((void**)&u, N * N * sizeof(double));
hipHostMalloc((void**)&u_new, N * N * sizeof(double));
for(int i = 0; i<N*N; i++)
*(u+i) = 0.0;
double norm_init = norm(u,N,h_sq);
struct timeval start, end;
gettimeofday(&start, NULL);
jacobi(u, u_new, N, h , h_sq,norm_init);
gettimeofday(&end, NULL);
double time_taken = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
printf("\nThe program took %f seconds to execute\n", time_taken);
hipHostFree(u_new);
double *u_GPU, *u_new_GPU;
hipHostMalloc((void**)&u_GPU, N * N * sizeof(double));
hipHostMalloc((void**)&u_new_GPU, N * N * sizeof(double));
for(int i = 0; i<N*N; i++)
*(u_GPU+i) = 0.0;
double *u_device;
double *u_new_device;
hipMalloc(&u_device, N*N*sizeof(double));
hipMalloc(&u_new_device, N*N*sizeof(double));
gettimeofday(&start, NULL);
jacobiGPU(u_GPU, u_new_GPU, N, h ,h_sq,norm_init,u_device,u_new_device);
gettimeofday(&end, NULL);
time_taken = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
printf("\nThe program took %f seconds to execute\n", time_taken);
hipFree(u_device);
hipFree(u_new_device);
/*-----Calculating errors----*/
double error = 0.0;
for(int i=0; i<N;i++)
for(int j=0; j<N; j++)
{
error+= *(u + i*N + j) - *(u_GPU + i*N + j);
}
printf("Calculated error between GPU and CPU code: %f", error);
hipHostFree(u);
hipHostFree(u_GPU);
hipHostFree(u_new_GPU);
return 0;
}
| e99781bad96b2893e7a17fd6f38ade022107dc7f.cu | #include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__device__ double norm_calc_device;
__global__ void JacobiKernel(double *u, double *u_new, int N, double h_sq) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( (idx>N) && (idx%N!=0) && (idx%N!=N-1) && (idx/N < N-1))
*(u_new + idx) = 0.25 * ((h_sq)+ *(u + idx - N) + *(u + idx - 1) + *(u + idx + N) + *(u + idx + 1));
}
double norm (double *u , int N, double h_sq)
{
double norm_2d = 0.0;
for(int i = 1 ; i < N-1 ; i++)
for(int j = 1; j < N-1 ; j++){
double temp = 0.0;
temp+=4.0 * *(u + i*N + j);
temp-= *(u + (i-1)*N + j);
temp-= *(u + i*N + (j-1));
temp-= *(u + (i+1)*N + j);
temp-= *(u + i*N + (j+1));
temp/=h_sq;
norm_2d+= pow((temp-1.0),2);
}
norm_2d = sqrt(norm_2d);
return norm_2d;
}
__global__ void normKernel(double *u , int N , double h_sq){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
if( (idx>N) && (idx%N!=0) && (idx%N!=N-1) && (idx/N < N-1)){
temp+=4.0 * *(u + idx);
temp-= *(u + idx - N);
temp-= *(u + idx - 1);
temp-= *(u + idx + N);
temp-= *(u + idx + 1);
temp/=h_sq;
temp = pow((temp-1.0),2);
}
atomicAdd(&norm_calc_device, temp);
}
void jacobi(double *u, double * u_new, int N, double h, double h_sq, double norm_init)
{
printf("Jacobi Method:\n");
int iter = 1;
double norm_calc = norm_init;
int max_iter = 1000;
printf("\nInitial Norm:%f\n", norm_init);
while(norm_calc * 1000000 > norm_init && iter <= max_iter)
{
for(int i = 1; i < N-1 ; i++)
for(int j = 1 ; j < N-1 ; j++)
*(u_new + i*N + j) = 0.25 * ((h_sq)+ *(u + (i-1)*N + j) + *(u + i*N + (j-1)) + *(u + (i+1)*N + j) + *(u + i*N + (j+1)));
swap(u,u_new);
norm_calc = norm( u, N,h_sq);
iter++;
}
printf("\nFinal Norm:%f\n", norm_calc);
}
void jacobiGPU(double *u, double * u_new, int N, double h, double h_sq, double norm_init, double *u_device, double *u_new_device)
{
printf("Jacobi Method:\n");
int iter = 1;
double norm_calc = norm_init;
int max_iter = 1000;
printf("\nInitial Norm:%f\n", norm_init);
cudaMemcpy(u_device, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
while(norm_calc * 1000000 > norm_init && iter <= max_iter)
{
JacobiKernel<<<N*N/1024+1,1024>>>(u_device, u_new_device, N,h_sq);
cudaDeviceSynchronize();
cudaMemcpy(u_device, u_new_device, N*N*sizeof(double), cudaMemcpyDeviceToDevice);
norm_calc = 0.0;
cudaMemcpyToSymbol(norm_calc_device, &norm_calc, sizeof(double) );
normKernel<<<N*N/1024+1,1024>>>(u_device, N,h_sq);
cudaMemcpyFromSymbol(&norm_calc, norm_calc_device, sizeof(double), 0, cudaMemcpyDeviceToHost);
norm_calc = sqrt(norm_calc);
iter++;
}
cudaMemcpy(u, u_new_device, N*N*sizeof(double), cudaMemcpyDeviceToHost);
printf("\nFinal Norm:%f\n", norm_calc);
}
int main(int argc, char **argv)
{
int N;
double h, h_sq;
N = 1000;
if(argc == 2)
N = atoi(argv[1]);
h = (double)1/(double)(N+1);
h_sq = h*h;
double *u, *u_new;
cudaMallocHost((void**)&u, N * N * sizeof(double));
cudaMallocHost((void**)&u_new, N * N * sizeof(double));
for(int i = 0; i<N*N; i++)
*(u+i) = 0.0;
double norm_init = norm(u,N,h_sq);
struct timeval start, end;
gettimeofday(&start, NULL);
jacobi(u, u_new, N, h , h_sq,norm_init);
gettimeofday(&end, NULL);
double time_taken = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
printf("\nThe program took %f seconds to execute\n", time_taken);
cudaFreeHost(u_new);
double *u_GPU, *u_new_GPU;
cudaMallocHost((void**)&u_GPU, N * N * sizeof(double));
cudaMallocHost((void**)&u_new_GPU, N * N * sizeof(double));
for(int i = 0; i<N*N; i++)
*(u_GPU+i) = 0.0;
double *u_device;
double *u_new_device;
cudaMalloc(&u_device, N*N*sizeof(double));
cudaMalloc(&u_new_device, N*N*sizeof(double));
gettimeofday(&start, NULL);
jacobiGPU(u_GPU, u_new_GPU, N, h ,h_sq,norm_init,u_device,u_new_device);
gettimeofday(&end, NULL);
time_taken = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
printf("\nThe program took %f seconds to execute\n", time_taken);
cudaFree(u_device);
cudaFree(u_new_device);
/*-----Calculating errors----*/
double error = 0.0;
for(int i=0; i<N;i++)
for(int j=0; j<N; j++)
{
error+= *(u + i*N + j) - *(u_GPU + i*N + j);
}
printf("Calculated error between GPU and CPU code: %f", error);
cudaFreeHost(u);
cudaFreeHost(u_GPU);
cudaFreeHost(u_new_GPU);
return 0;
}
|
c425243b117eb209021fdc5f6f13f0548a7dcb0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <omp.h>
#include <stdlib.h> // rand, srand for cpu
#include <hiprand/hiprand_kernel.h>
#define BLOCK_SIZE 33// shared memory size
__global__ void createRandomNumbers(int N, hiprandState_t * state, unsigned long seed)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init (seed, col, col, &state[col]);
}
__global__ void spreadFireGPU(int *a, int *b, int N, int fireSpreadPhases, int fireSpreadProbability, hiprandState_t *states)
{
int tx = threadIdx.x + 1; // avoid out of bounds
int ty = threadIdx.y + 1; // avoid out of bounds
// calculate the row and column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int index = row * N + col;
//__shared__ hiprandState_t s_rand[SHARED_MEM_SIZE][SHARED_MEM_SIZE];
__shared__ int s_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int s_b[BLOCK_SIZE][BLOCK_SIZE];
// write to shared memory
s_a[ty][tx] = a[index];
// wait until all threads have written to shared memory
__syncthreads();
float fireProbability = 0;
// iterate through the process of spreading forest fires for allocated amount of times
for (int i = 0; i < fireSpreadPhases; i++)
{
switch(s_a[ty][tx])
{
case 0:
{
s_b[ty][tx] = 0;
break;
}
case 1:
{
// check the neighbourhood - cellular automata
// if a neighbouring element has a tree that is on fire
// then randomly see if the element itself is going to be ignited
// Please note: the commented out parts of the if statement are those
// that create memory leaks s_a[ty minus any amount] - trying to stipulate the threadidx.y did not work
// as apparently is always has a value of 0...
if(tx > 0)
if(
// Top left and top right
s_a[ty + 1][tx + 1] == 2 || s_a[ty + 1][tx - 1] == 2 ||
// bottom left and bottom right
s_a[ty - 1][tx - 1] == 2 || s_a[ty - 1][tx + 1] == 2 ||
// top and bottom
s_a[ty + 1][tx] == 2 || s_a[ty - 1][tx] == 2 ||
// left and right
s_a[ty][tx + 1] == 2 || s_a[ty][tx - 1] == 2 )
{
fireProbability = hiprand_uniform(&states[tx]);
// to match the users probability chance (or to be near enough to it)
// multiply randomised number by 100
if( fireProbability * 100 < fireSpreadProbability )
s_b[ty][tx] = 2;
}
else
s_b[ty][tx] = 1;
break;
}
case 2:
{
fireProbability = hiprand_uniform(&states[tx]);
// if a tree is on fire, there is a 30% chance that it will be burnt down completely
// will not use *100, as that would be a waste of resources
if(fireProbability < 0.3)
s_b[ty][tx] = 0;
else
s_b[ty][tx] = 2;
break;
}
}
__syncthreads();
// copy over results from this computation to the next
s_a[ty][tx] = s_b[ty][tx];
// wait until each thread has written output
__syncthreads();
}
__syncthreads();
b[index] = s_b[ty][tx];
}
void checkForCudaError()
{
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf(" CUDA error at setup_kernel: %s \n", hipGetErrorString(error));
exit(-1);
}
}
int nextPow2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
int main()
{
// Number of elements (NxN)
int N = 1024;
// number of phases, and probability of igniting
int fireSpreadPhases = 0;
int fireSpreadProbability = 0;
// Displaying results toggle
int displayResults = 0;
// host memory pointers
int *a_h = NULL;
int *b_h = NULL;
// optimal number of blocks and threads
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
// number of byes in the array
size_t memSize = (N*N) * sizeof(int);
// allocate memory on the host
a_h = (int*) malloc(memSize);
b_h = (int*) malloc(memSize);
// device memory pointers
int *a_d = NULL;
int *b_d = NULL;
// allocate memory on the device
hipMalloc((void**)&a_d, memSize);
hipMalloc((void**)&b_d, memSize);
// STARTUP - number of simultations and the probability of fire spreading
printf("\nPlease define the number of elements that will be used for this simulation (between 32 and 1024): ");
scanf("%i", &N);
while(N < 32 || N > 1024)
{
printf("\nThe amount defined is too low, please make it between 0 and 1024: ");
scanf("%i", &N);
}
// declare threads and blocks; dim3 variables
int maxThreads = 32;
int maxBlocks = N/32;//deviceProp.maxThreadsPerMultiProcessor/maxThreads*deviceProp.multiProcessorCount;
dim3 grid(maxBlocks, maxBlocks, 1);
dim3 block(maxThreads, maxThreads , 1);
printf("\nThe total number of threads are %i", maxThreads);
printf("\nThe total number of blocks are %i", maxBlocks);
printf("\nThe total amount of elements are: %i x %i = %i.", N,N,N*N);
printf("\n\nFires in forests spread over time; please determine the amount of phases (or turns) "
"that there will be for this session: ");
scanf("%i", &fireSpreadPhases);
printf("\nEnter the probability of fire spreading from an ignited tree to those adjacent to it (0 - 100): ");
scanf("%i", &fireSpreadProbability);
// The central point will be given a value of 2 -- tree is on fire
// This is to determine the starting point for the simulation.
for (int i = 0; i < N; i++)
{
// first row is all empty; as is last
if(i == 0 || i == N - 1)
for (int j = 0; j < N; j++)
a_h[i * N + j] = 0;
else
{
for (int j = 0; j < N; j++)
{
if(j % 16 == 0)
a_h[i * N + j] = 2; // once for each 32 - so that all blocks access and use neighbours
else
a_h[i * N + j] = 1;
}
}
}
// Copy initialisation data above to the device
hipMemcpy(a_d, a_h, memSize, hipMemcpyHostToDevice);
hipMemcpy(b_d, b_h, memSize, hipMemcpyHostToDevice);
// INITIALISATION of seeds for hiprand (cuda version of rand() )
// create time variables so that we have a random unsigned long
// that will generate a different set of random numbers
time_t seedTime;
time(&seedTime);
unsigned int memFloatSize = (N*N) * sizeof(float);
// create random states
hiprandState_t* states_d;
hipMalloc(&states_d, memFloatSize*sizeof(hiprandState_t));
dim3 randomGrid(maxBlocks,1,1);
dim3 randomBlock(maxThreads, 1,1);
// setup the seeds
hipLaunchKernelGGL(( createRandomNumbers), dim3(randomGrid), dim3(randomBlock), 0, 0, N, states_d, (unsigned long) seedTime);
// error detection
hipDeviceSynchronize();
// check for error
checkForCudaError();
//hipFuncSetCacheConfig(spreadFireGPU, hipFuncCachePreferShared);
// STARTING GPU Implementation
printf("\nMeasuring GPU execution time...");
// start timer and recording
float timeGPU;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// execute kernal
hipLaunchKernelGGL(( spreadFireGPU), dim3(grid), dim3(block), 0, 0, a_d, b_d, N, fireSpreadPhases, fireSpreadProbability, states_d);
// stop timer and display results
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeGPU, start, stop);
printf("\n\nThe GPU implementation speed is: %f ms \n", timeGPU);
// error detection
hipDeviceSynchronize();
// check for error
checkForCudaError();
// collect results from GPU
hipMemcpy(b_h, b_d, memSize, hipMemcpyDeviceToHost);
printf("\n\nWould you like to display the results? (1 for Y or 2 for N): ");
scanf("%i", &displayResults);
if(displayResults == 1)
for(int i = 0; i < N; i ++)
{
for (int j = 0; j < N; j++)
{
printf(" %i ", b_h[i * N + j]);
}
printf("\n");
}
// free device memory
hipFree(a_d);
hipFree(b_d);
hipFree(states_d);
// free up some memory
free(a_h);
free(b_h);
return 0;
} | c425243b117eb209021fdc5f6f13f0548a7dcb0b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <omp.h>
#include <stdlib.h> // rand, srand for cpu
#include <curand_kernel.h>
#define BLOCK_SIZE 33// shared memory size
__global__ void createRandomNumbers(int N, curandState * state, unsigned long seed)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
curand_init (seed, col, col, &state[col]);
}
__global__ void spreadFireGPU(int *a, int *b, int N, int fireSpreadPhases, int fireSpreadProbability, curandState *states)
{
int tx = threadIdx.x + 1; // avoid out of bounds
int ty = threadIdx.y + 1; // avoid out of bounds
// calculate the row and column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int index = row * N + col;
//__shared__ curandState s_rand[SHARED_MEM_SIZE][SHARED_MEM_SIZE];
__shared__ int s_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int s_b[BLOCK_SIZE][BLOCK_SIZE];
// write to shared memory
s_a[ty][tx] = a[index];
// wait until all threads have written to shared memory
__syncthreads();
float fireProbability = 0;
// iterate through the process of spreading forest fires for allocated amount of times
for (int i = 0; i < fireSpreadPhases; i++)
{
switch(s_a[ty][tx])
{
case 0:
{
s_b[ty][tx] = 0;
break;
}
case 1:
{
// check the neighbourhood - cellular automata
// if a neighbouring element has a tree that is on fire
// then randomly see if the element itself is going to be ignited
// Please note: the commented out parts of the if statement are those
// that create memory leaks s_a[ty minus any amount] - trying to stipulate the threadidx.y did not work
// as apparently is always has a value of 0...
if(tx > 0)
if(
// Top left and top right
s_a[ty + 1][tx + 1] == 2 || s_a[ty + 1][tx - 1] == 2 ||
// bottom left and bottom right
s_a[ty - 1][tx - 1] == 2 || s_a[ty - 1][tx + 1] == 2 ||
// top and bottom
s_a[ty + 1][tx] == 2 || s_a[ty - 1][tx] == 2 ||
// left and right
s_a[ty][tx + 1] == 2 || s_a[ty][tx - 1] == 2 )
{
fireProbability = curand_uniform(&states[tx]);
// to match the users probability chance (or to be near enough to it)
// multiply randomised number by 100
if( fireProbability * 100 < fireSpreadProbability )
s_b[ty][tx] = 2;
}
else
s_b[ty][tx] = 1;
break;
}
case 2:
{
fireProbability = curand_uniform(&states[tx]);
// if a tree is on fire, there is a 30% chance that it will be burnt down completely
// will not use *100, as that would be a waste of resources
if(fireProbability < 0.3)
s_b[ty][tx] = 0;
else
s_b[ty][tx] = 2;
break;
}
}
__syncthreads();
// copy over results from this computation to the next
s_a[ty][tx] = s_b[ty][tx];
// wait until each thread has written output
__syncthreads();
}
__syncthreads();
b[index] = s_b[ty][tx];
}
void checkForCudaError()
{
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf(" CUDA error at setup_kernel: %s \n", cudaGetErrorString(error));
exit(-1);
}
}
int nextPow2(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
int main()
{
// Number of elements (NxN)
int N = 1024;
// number of phases, and probability of igniting
int fireSpreadPhases = 0;
int fireSpreadProbability = 0;
// Displaying results toggle
int displayResults = 0;
// host memory pointers
int *a_h = NULL;
int *b_h = NULL;
// optimal number of blocks and threads
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
// number of byes in the array
size_t memSize = (N*N) * sizeof(int);
// allocate memory on the host
a_h = (int*) malloc(memSize);
b_h = (int*) malloc(memSize);
// device memory pointers
int *a_d = NULL;
int *b_d = NULL;
// allocate memory on the device
cudaMalloc((void**)&a_d, memSize);
cudaMalloc((void**)&b_d, memSize);
// STARTUP - number of simultations and the probability of fire spreading
printf("\nPlease define the number of elements that will be used for this simulation (between 32 and 1024): ");
scanf("%i", &N);
while(N < 32 || N > 1024)
{
printf("\nThe amount defined is too low, please make it between 0 and 1024: ");
scanf("%i", &N);
}
// declare threads and blocks; dim3 variables
int maxThreads = 32;
int maxBlocks = N/32;//deviceProp.maxThreadsPerMultiProcessor/maxThreads*deviceProp.multiProcessorCount;
dim3 grid(maxBlocks, maxBlocks, 1);
dim3 block(maxThreads, maxThreads , 1);
printf("\nThe total number of threads are %i", maxThreads);
printf("\nThe total number of blocks are %i", maxBlocks);
printf("\nThe total amount of elements are: %i x %i = %i.", N,N,N*N);
printf("\n\nFires in forests spread over time; please determine the amount of phases (or turns) "
"that there will be for this session: ");
scanf("%i", &fireSpreadPhases);
printf("\nEnter the probability of fire spreading from an ignited tree to those adjacent to it (0 - 100): ");
scanf("%i", &fireSpreadProbability);
// The central point will be given a value of 2 -- tree is on fire
// This is to determine the starting point for the simulation.
for (int i = 0; i < N; i++)
{
// first row is all empty; as is last
if(i == 0 || i == N - 1)
for (int j = 0; j < N; j++)
a_h[i * N + j] = 0;
else
{
for (int j = 0; j < N; j++)
{
if(j % 16 == 0)
a_h[i * N + j] = 2; // once for each 32 - so that all blocks access and use neighbours
else
a_h[i * N + j] = 1;
}
}
}
// Copy initialisation data above to the device
cudaMemcpy(a_d, a_h, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, memSize, cudaMemcpyHostToDevice);
// INITIALISATION of seeds for curand (cuda version of rand() )
// create time variables so that we have a random unsigned long
// that will generate a different set of random numbers
time_t seedTime;
time(&seedTime);
unsigned int memFloatSize = (N*N) * sizeof(float);
// create random states
curandState* states_d;
cudaMalloc(&states_d, memFloatSize*sizeof(curandState));
dim3 randomGrid(maxBlocks,1,1);
dim3 randomBlock(maxThreads, 1,1);
// setup the seeds
createRandomNumbers<<<randomGrid, randomBlock>>>(N, states_d, (unsigned long) seedTime);
// error detection
cudaThreadSynchronize();
// check for error
checkForCudaError();
//cudaFuncSetCacheConfig(spreadFireGPU, cudaFuncCachePreferShared);
// STARTING GPU Implementation
printf("\nMeasuring GPU execution time...");
// start timer and recording
float timeGPU;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// execute kernal
spreadFireGPU<<<grid, block>>>(a_d, b_d, N, fireSpreadPhases, fireSpreadProbability, states_d);
// stop timer and display results
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeGPU, start, stop);
printf("\n\nThe GPU implementation speed is: %f ms \n", timeGPU);
// error detection
cudaThreadSynchronize();
// check for error
checkForCudaError();
// collect results from GPU
cudaMemcpy(b_h, b_d, memSize, cudaMemcpyDeviceToHost);
printf("\n\nWould you like to display the results? (1 for Y or 2 for N): ");
scanf("%i", &displayResults);
if(displayResults == 1)
for(int i = 0; i < N; i ++)
{
for (int j = 0; j < N; j++)
{
printf(" %i ", b_h[i * N + j]);
}
printf("\n");
}
// free device memory
cudaFree(a_d);
cudaFree(b_d);
cudaFree(states_d);
// free up some memory
free(a_h);
free(b_h);
return 0;
} |
3614a258d726043650e9e61cbd174eda7c3c95d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type mBeta, mAlpha;
mAlpha = 0.0070000000000000001*exp( 2.4000000000000004*(v+ 48.0)* 0.038284839203675342);
mBeta = 0.0070000000000000001*exp( -3.5999999999999996*(v+ 48.0)* 0.038284839203675342);
_pp_var_m[tid_] = mAlpha/(mAlpha+mBeta);
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type ba_0_, a_0_, qt, mInf, mRat, mAlpha, mBeta, ll1_, iab, ll0_;
ll1_ = 0.;
ll0_ = 0.;
qt = pow( 2.2999999999999998, (celsius- 30.0)* 0.10000000000000001);
mAlpha = 0.0070000000000000001*exp( 2.4000000000000004*(v+ 48.0)* 0.038284839203675342);
mBeta = 0.0070000000000000001*exp( -3.5999999999999996*(v+ 48.0)* 0.038284839203675342);
iab = 1.0/(mAlpha+mBeta);
mInf = mAlpha*iab;
mRat = qt/( 15.0+iab);
a_0_ = -1.0*mRat;
ba_0_ = mInf*mRat/a_0_;
ll0_ = a_0_*dt;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll1_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = _pp_var_gbar[tid_]*_pp_var_m[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_gbar[tid_]*_pp_var_m[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_Im_v2_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p);
}
void mechanism_Im_v2_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_Im_v2_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_Im_v2_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_Im_v2_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_Im_v2_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
| 3614a258d726043650e9e61cbd174eda7c3c95d6.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type mBeta, mAlpha;
mAlpha = 0.0070000000000000001*exp( 2.4000000000000004*(v+ 48.0)* 0.038284839203675342);
mBeta = 0.0070000000000000001*exp( -3.5999999999999996*(v+ 48.0)* 0.038284839203675342);
_pp_var_m[tid_] = mAlpha/(mAlpha+mBeta);
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type ba_0_, a_0_, qt, mInf, mRat, mAlpha, mBeta, ll1_, iab, ll0_;
ll1_ = 0.;
ll0_ = 0.;
qt = pow( 2.2999999999999998, (celsius- 30.0)* 0.10000000000000001);
mAlpha = 0.0070000000000000001*exp( 2.4000000000000004*(v+ 48.0)* 0.038284839203675342);
mBeta = 0.0070000000000000001*exp( -3.5999999999999996*(v+ 48.0)* 0.038284839203675342);
iab = 1.0/(mAlpha+mBeta);
mInf = mAlpha*iab;
mRat = qt/( 15.0+iab);
a_0_ = -1.0*mRat;
ba_0_ = mInf*mRat/a_0_;
ll0_ = a_0_*dt;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll1_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = _pp_var_gbar[tid_]*_pp_var_m[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_gbar[tid_]*_pp_var_m[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_Im_v2_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p);
}
void mechanism_Im_v2_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
compute_currents<<<grid_dim, block_dim>>>(*p);
}
void mechanism_Im_v2_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_Im_v2_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_Im_v2_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_Im_v2_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
|
985f2dbfce1f3915e9764ddcc01848513e3a2160.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _CONVOLUTION_KERNEL_H_
#define _CONVOLUTION_KERNEL_H_
/* The kernel is stored in GPU global memory in this kernel implementation.
* The input vector is also stored in global memory.
*/
__global__ void convolution_kernel_v1(float *N, float *result, float *kernel,
int num_elements, int kernel_width)
{
/* Obtain the index of the thread within the execution grid */
int i = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if(i >= num_elements)
return;
int N_start_point = i - (kernel_width/2);
for (int j = 0; j < kernel_width; j++) {
if ((j + N_start_point >= 0) && (j + N_start_point < num_elements))
sum += N[j + N_start_point] * kernel[j];
}
result[i] = sum;
}
/* The kernel is stored in GPU constant memory in kernel_c.
* The input vector is stored in global memory.
*/
__global__ void convolution_kernel_v2(float *N, float *result,
int num_elements, int kernel_width)
{
/* Obtain the index of the thread within the grid */
int i = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if(i >= num_elements)
return;
int N_start_point = i - (kernel_width/2);
for (int j = 0; j < kernel_width; j++) {
if ((j + N_start_point >= 0) && (j + N_start_point < num_elements))
sum += N[j + N_start_point] * kernel_c[j];
}
result[i] = sum;
}
/* Tiled convolution kernel using shared memory.
* The kernel is stored in GPU constant memory in kernel_c.
*/
__global__ void convolution_kernel_tiled(float *N, float *result,
int num_elements, int kernel_width)
{
__shared__ float N_s[THREAD_BLOCK_SIZE + MAX_KERNEL_WIDTH - 1];
/* Obtain the index of the thread within the grid. */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int half_width = kernel_width/2;
/* Load the left halo elements from the previous tile.
* The number of halo elements will be half_width.
*/
int left_halo_index = (blockIdx.x - 1) * blockDim.x + threadIdx.x;
if (threadIdx.x >= (blockDim.x - half_width)) {
if (left_halo_index < 0)
N_s[threadIdx.x - (blockDim.x - half_width)] = 0.0;
else
N_s[threadIdx.x - (blockDim.x - half_width)] = N[left_halo_index];
}
/* Load the center elements for the tile */
if (i < num_elements)
N_s[half_width + threadIdx.x] = N[i];
else
N_s[half_width + threadIdx.x] = 0.0;
/* Load the right halo elements from the next tile.
* The number of halo elements will again be half_width.
*/
int right_halo_index = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
if (threadIdx.x < half_width) {
if (right_halo_index >= num_elements)
N_s[threadIdx.x + (blockDim.x + half_width)] = 0.0;
else
N_s[threadIdx.x + (blockDim.x + half_width)] = N[right_halo_index];
}
__syncthreads();
/* Convolve kernel over input elements */
float sum = 0.0;
for (int j = 0; j < kernel_width; j++)
sum += N_s[j + threadIdx.x] * kernel_c[j];
result[i] = sum;
}
#endif /* _CONVOLUTION_KERNEL_H_ */
| 985f2dbfce1f3915e9764ddcc01848513e3a2160.cu | #ifndef _CONVOLUTION_KERNEL_H_
#define _CONVOLUTION_KERNEL_H_
/* The kernel is stored in GPU global memory in this kernel implementation.
* The input vector is also stored in global memory.
*/
__global__ void convolution_kernel_v1(float *N, float *result, float *kernel,
int num_elements, int kernel_width)
{
/* Obtain the index of the thread within the execution grid */
int i = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if(i >= num_elements)
return;
int N_start_point = i - (kernel_width/2);
for (int j = 0; j < kernel_width; j++) {
if ((j + N_start_point >= 0) && (j + N_start_point < num_elements))
sum += N[j + N_start_point] * kernel[j];
}
result[i] = sum;
}
/* The kernel is stored in GPU constant memory in kernel_c.
* The input vector is stored in global memory.
*/
__global__ void convolution_kernel_v2(float *N, float *result,
int num_elements, int kernel_width)
{
/* Obtain the index of the thread within the grid */
int i = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if(i >= num_elements)
return;
int N_start_point = i - (kernel_width/2);
for (int j = 0; j < kernel_width; j++) {
if ((j + N_start_point >= 0) && (j + N_start_point < num_elements))
sum += N[j + N_start_point] * kernel_c[j];
}
result[i] = sum;
}
/* Tiled convolution kernel using shared memory.
* The kernel is stored in GPU constant memory in kernel_c.
*/
__global__ void convolution_kernel_tiled(float *N, float *result,
int num_elements, int kernel_width)
{
__shared__ float N_s[THREAD_BLOCK_SIZE + MAX_KERNEL_WIDTH - 1];
/* Obtain the index of the thread within the grid. */
int i = blockIdx.x * blockDim.x + threadIdx.x;
int half_width = kernel_width/2;
/* Load the left halo elements from the previous tile.
* The number of halo elements will be half_width.
*/
int left_halo_index = (blockIdx.x - 1) * blockDim.x + threadIdx.x;
if (threadIdx.x >= (blockDim.x - half_width)) {
if (left_halo_index < 0)
N_s[threadIdx.x - (blockDim.x - half_width)] = 0.0;
else
N_s[threadIdx.x - (blockDim.x - half_width)] = N[left_halo_index];
}
/* Load the center elements for the tile */
if (i < num_elements)
N_s[half_width + threadIdx.x] = N[i];
else
N_s[half_width + threadIdx.x] = 0.0;
/* Load the right halo elements from the next tile.
* The number of halo elements will again be half_width.
*/
int right_halo_index = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
if (threadIdx.x < half_width) {
if (right_halo_index >= num_elements)
N_s[threadIdx.x + (blockDim.x + half_width)] = 0.0;
else
N_s[threadIdx.x + (blockDim.x + half_width)] = N[right_halo_index];
}
__syncthreads();
/* Convolve kernel over input elements */
float sum = 0.0;
for (int j = 0; j < kernel_width; j++)
sum += N_s[j + threadIdx.x] * kernel_c[j];
result[i] = sum;
}
#endif /* _CONVOLUTION_KERNEL_H_ */
|
819f075397afe5233ab147ab69131e4966e99d1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel2_z;
int xdim0_advec_mom_kernel2_z_h = -1;
__constant__ int ydim0_advec_mom_kernel2_z;
int ydim0_advec_mom_kernel2_z_h = -1;
__constant__ int xdim1_advec_mom_kernel2_z;
int xdim1_advec_mom_kernel2_z_h = -1;
__constant__ int ydim1_advec_mom_kernel2_z;
int ydim1_advec_mom_kernel2_z_h = -1;
__constant__ int xdim2_advec_mom_kernel2_z;
int xdim2_advec_mom_kernel2_z_h = -1;
__constant__ int ydim2_advec_mom_kernel2_z;
int ydim2_advec_mom_kernel2_z_h = -1;
__constant__ int xdim3_advec_mom_kernel2_z;
int xdim3_advec_mom_kernel2_z_h = -1;
__constant__ int ydim3_advec_mom_kernel2_z;
int ydim3_advec_mom_kernel2_z_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel2_z * (y) + \
xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel2_z * (y) + \
xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel2_z * (y) + \
xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel2_z * (y) + \
xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z * (z))
// user function
__device__
inline void
advec_mom_kernel2_z(double *vel1, const double *node_mass_post,
const double *node_mass_pre, const double *mom_flux) {
vel1[OPS_ACC0(0, 0, 0)] =
(vel1[OPS_ACC0(0, 0, 0)] * node_mass_pre[OPS_ACC2(0, 0, 0)] +
mom_flux[OPS_ACC3(0, 0, -1)] - mom_flux[OPS_ACC3(0, 0, 0)]) /
node_mass_post[OPS_ACC1(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_advec_mom_kernel2_z(double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel2_z(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 36))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(36, "advec_mom_kernel2_z");
OPS_kernels[36].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel2_z_h ||
ydim0 != ydim0_advec_mom_kernel2_z_h ||
xdim1 != xdim1_advec_mom_kernel2_z_h ||
ydim1 != ydim1_advec_mom_kernel2_z_h ||
xdim2 != xdim2_advec_mom_kernel2_z_h ||
ydim2 != ydim2_advec_mom_kernel2_z_h ||
xdim3 != xdim3_advec_mom_kernel2_z_h ||
ydim3 != ydim3_advec_mom_kernel2_z_h) {
hipMemcpyToSymbol(xdim0_advec_mom_kernel2_z, &xdim0, sizeof(int));
xdim0_advec_mom_kernel2_z_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_mom_kernel2_z, &ydim0, sizeof(int));
ydim0_advec_mom_kernel2_z_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_mom_kernel2_z, &xdim1, sizeof(int));
xdim1_advec_mom_kernel2_z_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_mom_kernel2_z, &ydim1, sizeof(int));
ydim1_advec_mom_kernel2_z_h = ydim1;
hipMemcpyToSymbol(xdim2_advec_mom_kernel2_z, &xdim2, sizeof(int));
xdim2_advec_mom_kernel2_z_h = xdim2;
hipMemcpyToSymbol(ydim2_advec_mom_kernel2_z, &ydim2, sizeof(int));
ydim2_advec_mom_kernel2_z_h = ydim2;
hipMemcpyToSymbol(xdim3_advec_mom_kernel2_z, &xdim3, sizeof(int));
xdim3_advec_mom_kernel2_z_h = xdim3;
hipMemcpyToSymbol(ydim3_advec_mom_kernel2_z, &ydim3, sizeof(int));
ydim3_advec_mom_kernel2_z_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[36].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_mom_kernel2_z), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[36].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[36].mpi_time += t2 - t1;
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
| 819f075397afe5233ab147ab69131e4966e99d1d.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_advec_mom_kernel2_z;
int xdim0_advec_mom_kernel2_z_h = -1;
__constant__ int ydim0_advec_mom_kernel2_z;
int ydim0_advec_mom_kernel2_z_h = -1;
__constant__ int xdim1_advec_mom_kernel2_z;
int xdim1_advec_mom_kernel2_z_h = -1;
__constant__ int ydim1_advec_mom_kernel2_z;
int ydim1_advec_mom_kernel2_z_h = -1;
__constant__ int xdim2_advec_mom_kernel2_z;
int xdim2_advec_mom_kernel2_z_h = -1;
__constant__ int ydim2_advec_mom_kernel2_z;
int ydim2_advec_mom_kernel2_z_h = -1;
__constant__ int xdim3_advec_mom_kernel2_z;
int xdim3_advec_mom_kernel2_z_h = -1;
__constant__ int ydim3_advec_mom_kernel2_z;
int ydim3_advec_mom_kernel2_z_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_mom_kernel2_z * (y) + \
xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_mom_kernel2_z * (y) + \
xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_mom_kernel2_z * (y) + \
xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_mom_kernel2_z * (y) + \
xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z * (z))
// user function
__device__
inline void
advec_mom_kernel2_z(double *vel1, const double *node_mass_post,
const double *node_mass_pre, const double *mom_flux) {
vel1[OPS_ACC0(0, 0, 0)] =
(vel1[OPS_ACC0(0, 0, 0)] * node_mass_pre[OPS_ACC2(0, 0, 0)] +
mom_flux[OPS_ACC3(0, 0, -1)] - mom_flux[OPS_ACC3(0, 0, 0)]) /
node_mass_post[OPS_ACC1(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_advec_mom_kernel2_z(double *__restrict arg0,
const double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim0_advec_mom_kernel2_z * ydim0_advec_mom_kernel2_z;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim1_advec_mom_kernel2_z * ydim1_advec_mom_kernel2_z;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim2_advec_mom_kernel2_z * ydim2_advec_mom_kernel2_z;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel2_z +
idx_z * 1 * 1 * xdim3_advec_mom_kernel2_z * ydim3_advec_mom_kernel2_z;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_mom_kernel2_z(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_advec_mom_kernel2_z(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 36))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(36, "advec_mom_kernel2_z");
OPS_kernels[36].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_advec_mom_kernel2_z_h ||
ydim0 != ydim0_advec_mom_kernel2_z_h ||
xdim1 != xdim1_advec_mom_kernel2_z_h ||
ydim1 != ydim1_advec_mom_kernel2_z_h ||
xdim2 != xdim2_advec_mom_kernel2_z_h ||
ydim2 != ydim2_advec_mom_kernel2_z_h ||
xdim3 != xdim3_advec_mom_kernel2_z_h ||
ydim3 != ydim3_advec_mom_kernel2_z_h) {
cudaMemcpyToSymbol(xdim0_advec_mom_kernel2_z, &xdim0, sizeof(int));
xdim0_advec_mom_kernel2_z_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_mom_kernel2_z, &ydim0, sizeof(int));
ydim0_advec_mom_kernel2_z_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_mom_kernel2_z, &xdim1, sizeof(int));
xdim1_advec_mom_kernel2_z_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_mom_kernel2_z, &ydim1, sizeof(int));
ydim1_advec_mom_kernel2_z_h = ydim1;
cudaMemcpyToSymbol(xdim2_advec_mom_kernel2_z, &xdim2, sizeof(int));
xdim2_advec_mom_kernel2_z_h = xdim2;
cudaMemcpyToSymbol(ydim2_advec_mom_kernel2_z, &ydim2, sizeof(int));
ydim2_advec_mom_kernel2_z_h = ydim2;
cudaMemcpyToSymbol(xdim3_advec_mom_kernel2_z, &xdim3, sizeof(int));
xdim3_advec_mom_kernel2_z_h = xdim3;
cudaMemcpyToSymbol(ydim3_advec_mom_kernel2_z, &ydim3, sizeof(int));
ydim3_advec_mom_kernel2_z_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[36].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_mom_kernel2_z<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[36].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[36].mpi_time += t2 - t1;
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[36].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
|
138437cdeb3b728055284327bea3706c93dd580d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto _pp_var_vshift __attribute__((unused)) = params_.globals[0];\
auto _pp_var_mTauF __attribute__((unused)) = params_.globals[1];\
auto _pp_var_hTauF __attribute__((unused)) = params_.globals[2];\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_h __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
_pp_var_m[tid_] = 1.0/( 1.0+exp( -(v+ 47.0-_pp_var_vshift)* 0.034482758620689655));
_pp_var_h[tid_] = 1.0/( 1.0+exp((v+ 66.0-_pp_var_vshift)* 0.10000000000000001));
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type a_1_, ba_0_, a_0_, hRat, qt, mInf, mRat, hInf, ll0_, ba_1_, ll3_, ll1_, ll2_;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
qt = pow( 2.2999999999999998, (celsius- 21.0)* 0.10000000000000001);
mInf = 1.0/( 1.0+exp( -(v+ 47.0-_pp_var_vshift)* 0.034482758620689655));
hInf = 1.0/( 1.0+exp((v+ 66.0-_pp_var_vshift)* 0.10000000000000001));
mRat = qt/( 0.34000000000000002+_pp_var_mTauF* 0.92000000000000004*exp(pow( -((v+ 71.0-_pp_var_vshift)* 0.016949152542372881), 2.0)));
hRat = qt/( 8.0+_pp_var_hTauF* 49.0*exp(pow( -((v+ 73.0-_pp_var_vshift)* 0.043478260869565216), 2.0)));
a_0_ = -1.0*mRat;
ba_0_ = mInf*mRat/a_0_;
ll0_ = a_0_*dt;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll1_;
a_1_ = -1.0*hRat;
ba_1_ = hInf*hRat/a_1_;
ll2_ = a_1_*dt;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_h[tid_] = -ba_1_+(_pp_var_h[tid_]+ba_1_)*ll3_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = _pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_h[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_h[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_K_T_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
if (!p->multiplicity) return;
hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(2}), block_dim, 0, *p);
}
void mechanism_K_T_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( compute_currents), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_K_T_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p);
}
void mechanism_K_T_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_K_T_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_K_T_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
| 138437cdeb3b728055284327bea3706c93dd580d.cu | #include <arbor/gpu/gpu_common.hpp>
#include <arbor/gpu/math_cu.hpp>
#include <arbor/gpu/reduce_by_key.hpp>
#include <arbor/mechanism_abi.h>
namespace arb {
namespace allen_catalogue {
#define PPACK_IFACE_BLOCK \
auto _pp_var_width __attribute__((unused)) = params_.width;\
auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\
auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\
auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\
auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\
auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\
auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\
auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\
auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\
auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\
auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\
auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\
auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\
auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\
auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\
auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\
auto* _pp_var_weight __attribute__((unused)) = params_.weight;\
auto& _pp_var_events __attribute__((unused)) = params_.events;\
auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\
auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\
auto _pp_var_vshift __attribute__((unused)) = params_.globals[0];\
auto _pp_var_mTauF __attribute__((unused)) = params_.globals[1];\
auto _pp_var_hTauF __attribute__((unused)) = params_.globals[2];\
auto* _pp_var_m __attribute__((unused)) = params_.state_vars[0];\
auto* _pp_var_h __attribute__((unused)) = params_.state_vars[1];\
auto* _pp_var_gbar __attribute__((unused)) = params_.parameters[0];\
auto& _pp_var_ion_k __attribute__((unused)) = params_.ion_states[0];\
auto* _pp_var_ion_k_index __attribute__((unused)) = params_.ion_states[0].index;\
//End of IFACEBLOCK
namespace {
using ::arb::gpu::exprelr;
using ::arb::gpu::safeinv;
using ::arb::gpu::min;
using ::arb::gpu::max;
__global__
void init(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
_pp_var_m[tid_] = 1.0/( 1.0+exp( -(v+ 47.0-_pp_var_vshift)* 0.034482758620689655));
_pp_var_h[tid_] = 1.0/( 1.0+exp((v+ 66.0-_pp_var_vshift)* 0.10000000000000001));
}
}
__global__
void multiply(arb_mechanism_ppack params_) {
PPACK_IFACE_BLOCK;
auto tid_ = threadIdx.x + blockDim.x*blockIdx.x;
auto idx_ = blockIdx.y; if(tid_<_pp_var_width) {
_pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_];
}
}
__global__
void advance_state(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type dt = _pp_var_vec_dt[node_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type celsius = _pp_var_temperature_degC[node_indexi_];
arb_value_type a_1_, ba_0_, a_0_, hRat, qt, mInf, mRat, hInf, ll0_, ba_1_, ll3_, ll1_, ll2_;
ll3_ = 0.;
ll2_ = 0.;
ll1_ = 0.;
ll0_ = 0.;
qt = pow( 2.2999999999999998, (celsius- 21.0)* 0.10000000000000001);
mInf = 1.0/( 1.0+exp( -(v+ 47.0-_pp_var_vshift)* 0.034482758620689655));
hInf = 1.0/( 1.0+exp((v+ 66.0-_pp_var_vshift)* 0.10000000000000001));
mRat = qt/( 0.34000000000000002+_pp_var_mTauF* 0.92000000000000004*exp(pow( -((v+ 71.0-_pp_var_vshift)* 0.016949152542372881), 2.0)));
hRat = qt/( 8.0+_pp_var_hTauF* 49.0*exp(pow( -((v+ 73.0-_pp_var_vshift)* 0.043478260869565216), 2.0)));
a_0_ = -1.0*mRat;
ba_0_ = mInf*mRat/a_0_;
ll0_ = a_0_*dt;
ll1_ = ( 1.0+ 0.5*ll0_)/( 1.0- 0.5*ll0_);
_pp_var_m[tid_] = -ba_0_+(_pp_var_m[tid_]+ba_0_)*ll1_;
a_1_ = -1.0*hRat;
ba_1_ = hInf*hRat/a_1_;
ll2_ = a_1_*dt;
ll3_ = ( 1.0+ 0.5*ll2_)/( 1.0- 0.5*ll2_);
_pp_var_h[tid_] = -ba_1_+(_pp_var_h[tid_]+ba_1_)*ll3_;
}
}
__global__
void compute_currents(arb_mechanism_ppack params_) {
int n_ = params_.width;
int tid_ = threadIdx.x + blockDim.x*blockIdx.x;
PPACK_IFACE_BLOCK;
if (tid_<n_) {
auto ion_k_indexi_ = _pp_var_ion_k_index[tid_];
auto node_indexi_ = _pp_var_node_index[tid_];
arb_value_type conductivity_ = 0;
arb_value_type current_ = 0;
arb_value_type ek = _pp_var_ion_k.reversal_potential[ion_k_indexi_];
arb_value_type v = _pp_var_vec_v[node_indexi_];
arb_value_type ik = 0;
ik = _pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_h[tid_]*(v-ek);
current_ = ik;
conductivity_ = _pp_var_gbar[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_m[tid_]*_pp_var_h[tid_];
_pp_var_vec_g[node_indexi_] = fma(10.0*_pp_var_weight[tid_], conductivity_, _pp_var_vec_g[node_indexi_]);
_pp_var_vec_i[node_indexi_] = fma(10.0*_pp_var_weight[tid_], current_, _pp_var_vec_i[node_indexi_]);
_pp_var_ion_k.current_density[ion_k_indexi_] = fma(10.0*_pp_var_weight[tid_], ik, _pp_var_ion_k.current_density[ion_k_indexi_]);
}
}
} // namespace
void mechanism_K_T_gpu_init_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
init<<<grid_dim, block_dim>>>(*p);
if (!p->multiplicity) return;
multiply<<<dim3{grid_dim, 2}, block_dim>>>(*p);
}
void mechanism_K_T_gpu_compute_currents_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
compute_currents<<<grid_dim, block_dim>>>(*p);
}
void mechanism_K_T_gpu_advance_state_(arb_mechanism_ppack* p) {
auto n = p->width;
unsigned block_dim = 128;
unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim);
advance_state<<<grid_dim, block_dim>>>(*p);
}
void mechanism_K_T_gpu_write_ions_(arb_mechanism_ppack* p) {}
void mechanism_K_T_gpu_post_event_(arb_mechanism_ppack* p) {}
void mechanism_K_T_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {}
} // namespace allen_catalogue
} // namespace arb
|
ee46aaf2ac4ad5c3aa6c7de395d5205e8decdf22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
//
// This sample demonstrates the use of streams for concurrent execution
//
// Devices of compute capability 1.x will run the kernels one after another
// Devices of compute capability 2.0 or higher can overlap the kernels
//
#include <prof.cu>
#include <stdlib.h>
#include <stdio.h>
#include <cutil_inline.h>
__global__ void mykernel( int *a, int n )
{
int idx = threadIdx.x;
int value = 1;
for(int i=0; i<n; i++)
value *= sin( (float)i ) + tan( (float)i );
a[idx] = value;
}
int main(int argc, const char **argv)
{
GpuProfiling::initProf();
int nblocks = 4;
int nthreads = 64;
int n = 50000;
int nkernels = 8;
int nbytes;
int devID;
hipDeviceProp_t deviceProps;
int *d_A=0;
hipStream_t *stream;
hipEvent_t start, stop;
float elapsedTime;
int qatest = 0;
// begin
printf("[concurrentKernels] - Starting...\n\n");
// get number of kernels if overridden on the command line
if (cutCheckCmdLineFlag(argc, (const char **)argv, "nkernels")) {
cutGetCmdLineArgumenti(argc, (const char **)argv, "nkernels", &nkernels);
}
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if (cutCheckCmdLineFlag(argc, (const char**)argv, "device")) {
cutGetCmdLineArgumenti(argc, (const char **) argv, "device", &devID);
}
else {
devID = cutGetMaxGflopsDeviceId();
}
cutilSafeCall(hipSetDevice(devID));
// QA testing mode
if (cutCheckCmdLineFlag(argc, (const char**)argv, "qatest")) {
qatest = 1;
}
// get number of SMs on this GPU
cutilSafeCall(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA Device %s has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
printf("CUDA Device %s is%s capable of concurrent kernel execution\n", deviceProps.name, (deviceProps.concurrentKernels==0)?" NOT":"");
stream = (hipStream_t *)malloc(nkernels * sizeof(hipStream_t));
for(int i=0; i<nkernels; i++)
{
cutilSafeCall(hipStreamCreate(&stream[i]));
}
// note: in this sample we will repeatedly overwrite the same
// block of device mem, but that's okay because we don't really
// care about the output of the kernel for the purposes of this
// example.
nbytes = nkernels * nthreads * sizeof(int);
cutilSafeCall(hipMalloc((void **)&d_A, nbytes));
cutilSafeCall(hipEventCreate(&start));
cutilSafeCall(hipEventCreate(&stop));
// start timer then launch all kernels in their streams
cutilSafeCall(hipEventRecord(start, 0));
for(int i=0; i<nkernels; i++)
{
// avoid synchronization points (events, error checks, etc.) inside
// this loop in order to get concurrent execution on devices that support it
GpuProfiling::prepareProfiling( nblocks, nthreads );
hipLaunchKernelGGL(( mykernel), dim3(nblocks), dim3(nthreads), 0, stream[i], &d_A[i*nthreads], n);
GpuProfiling::addResults("mykernel");
}
cutilSafeCall(hipEventRecord(stop, 0));
// wait for all streams to finish
cutilSafeCall(hipEventSynchronize(stop));
// get total time for all kernels
cutilSafeCall(hipEventElapsedTime(&elapsedTime, start, stop));
printf("\nAll %d kernels together took %.3fs\n", nkernels, elapsedTime/1000.f);
// check time to execute a single iteration
cutilSafeCall(hipEventRecord(start, 0));
GpuProfiling::prepareProfiling( nblocks, nthreads );
hipLaunchKernelGGL(( mykernel), dim3(nblocks), dim3(nthreads), 0, stream[0], d_A, n);
GpuProfiling::addResults("mykernel");
cutilCheckMsg("kernel launch failure");
cutilSafeCall(hipEventRecord(stop, 0));
cutilSafeCall(hipEventSynchronize(stop));
cutilSafeCall(hipEventElapsedTime(&elapsedTime, start, stop));
printf("(~%.3fs per kernel * %d kernels = ~%.3fs if no concurrent execution)\n",
elapsedTime/1000.f, nkernels, nkernels*elapsedTime/1000.f);
GpuProfiling::printResults();
// cleanup
printf("\nCleaning up...\n");
hipEventDestroy(start);
hipEventDestroy(stop);
if (stream)
{
for(int i=0; i<nkernels; i++)
{
cutilSafeCall(hipStreamDestroy(stream[i]));
}
free(stream);
}
if (d_A) hipFree(d_A);
if (qatest) {
// any errors that might have happened will have already been reported
printf("[concurrentKernels] - Test Results:\nPASSED\n");
}
exit(0);
}
| ee46aaf2ac4ad5c3aa6c7de395d5205e8decdf22.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
//
// This sample demonstrates the use of streams for concurrent execution
//
// Devices of compute capability 1.x will run the kernels one after another
// Devices of compute capability 2.0 or higher can overlap the kernels
//
#include <prof.cu>
#include <stdlib.h>
#include <stdio.h>
#include <cutil_inline.h>
__global__ void mykernel( int *a, int n )
{
int idx = threadIdx.x;
int value = 1;
for(int i=0; i<n; i++)
value *= sin( (float)i ) + tan( (float)i );
a[idx] = value;
}
int main(int argc, const char **argv)
{
GpuProfiling::initProf();
int nblocks = 4;
int nthreads = 64;
int n = 50000;
int nkernels = 8;
int nbytes;
int devID;
cudaDeviceProp deviceProps;
int *d_A=0;
cudaStream_t *stream;
cudaEvent_t start, stop;
float elapsedTime;
int qatest = 0;
// begin
printf("[concurrentKernels] - Starting...\n\n");
// get number of kernels if overridden on the command line
if (cutCheckCmdLineFlag(argc, (const char **)argv, "nkernels")) {
cutGetCmdLineArgumenti(argc, (const char **)argv, "nkernels", &nkernels);
}
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if (cutCheckCmdLineFlag(argc, (const char**)argv, "device")) {
cutGetCmdLineArgumenti(argc, (const char **) argv, "device", &devID);
}
else {
devID = cutGetMaxGflopsDeviceId();
}
cutilSafeCall(cudaSetDevice(devID));
// QA testing mode
if (cutCheckCmdLineFlag(argc, (const char**)argv, "qatest")) {
qatest = 1;
}
// get number of SMs on this GPU
cutilSafeCall(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA Device %s has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
printf("CUDA Device %s is%s capable of concurrent kernel execution\n", deviceProps.name, (deviceProps.concurrentKernels==0)?" NOT":"");
stream = (cudaStream_t *)malloc(nkernels * sizeof(cudaStream_t));
for(int i=0; i<nkernels; i++)
{
cutilSafeCall(cudaStreamCreate(&stream[i]));
}
// note: in this sample we will repeatedly overwrite the same
// block of device mem, but that's okay because we don't really
// care about the output of the kernel for the purposes of this
// example.
nbytes = nkernels * nthreads * sizeof(int);
cutilSafeCall(cudaMalloc((void **)&d_A, nbytes));
cutilSafeCall(cudaEventCreate(&start));
cutilSafeCall(cudaEventCreate(&stop));
// start timer then launch all kernels in their streams
cutilSafeCall(cudaEventRecord(start, 0));
for(int i=0; i<nkernels; i++)
{
// avoid synchronization points (events, error checks, etc.) inside
// this loop in order to get concurrent execution on devices that support it
GpuProfiling::prepareProfiling( nblocks, nthreads );
mykernel<<<nblocks, nthreads, 0, stream[i]>>>(&d_A[i*nthreads], n);
GpuProfiling::addResults("mykernel");
}
cutilSafeCall(cudaEventRecord(stop, 0));
// wait for all streams to finish
cutilSafeCall(cudaEventSynchronize(stop));
// get total time for all kernels
cutilSafeCall(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("\nAll %d kernels together took %.3fs\n", nkernels, elapsedTime/1000.f);
// check time to execute a single iteration
cutilSafeCall(cudaEventRecord(start, 0));
GpuProfiling::prepareProfiling( nblocks, nthreads );
mykernel<<<nblocks, nthreads, 0, stream[0]>>>(d_A, n);
GpuProfiling::addResults("mykernel");
cutilCheckMsg("kernel launch failure");
cutilSafeCall(cudaEventRecord(stop, 0));
cutilSafeCall(cudaEventSynchronize(stop));
cutilSafeCall(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("(~%.3fs per kernel * %d kernels = ~%.3fs if no concurrent execution)\n",
elapsedTime/1000.f, nkernels, nkernels*elapsedTime/1000.f);
GpuProfiling::printResults();
// cleanup
printf("\nCleaning up...\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (stream)
{
for(int i=0; i<nkernels; i++)
{
cutilSafeCall(cudaStreamDestroy(stream[i]));
}
free(stream);
}
if (d_A) cudaFree(d_A);
if (qatest) {
// any errors that might have happened will have already been reported
printf("[concurrentKernels] - Test Results:\nPASSED\n");
}
exit(0);
}
|
c4e4079952b8b7044d72d74891e99f77b2a435a0.hip | // !!! This is a file automatically generated by hipify!!!
//#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <hip/hip_runtime.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| c4e4079952b8b7044d72d74891e99f77b2a435a0.cu | //#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <cuda_runtime.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
N, a, alpha, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
fb2b7a7e9d0e1a66e5ad2ea3f74749a046ef93f5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <utils_cuda.h>
#include <point2d.h>
#include <vector>
#include <thrust/complex.h>
__global__
void fouriermd_compute_kernel(double * d_outReal, double * d_outImag,
double* d_inPoints, double* d_inFuncVals,
int npts, int resolution, int ndims,
double twopi, float frequencyStep){
int row = threadIdx.x + blockDim.x * blockIdx.x;
int col = threadIdx.y + blockDim.y * blockIdx.y;
//printf("thread indices %d , %d \n", m , l);
if ( col >= resolution || row >= resolution)// || s >= points.size())
{
return;
}
else{
double realCoeff = 0.0, imagCoeff = 0.0;
__syncthreads();
int halfRes = resolution * 0.5;
if(ndims == 2){
int wy = (row - halfRes) * frequencyStep;
int wx = (col - halfRes) * frequencyStep;
for(int i = 0; i < npts; i++){
double exp = -twopi * (wx * d_inPoints[2*i] + wy * d_inPoints[2*i+1]);
realCoeff += d_inFuncVals[i] * cosf(exp);
imagCoeff += d_inFuncVals[i] * sinf(exp);
}
__syncthreads();
int index = row * resolution + col;
//printf("I am thread %d , %d in block %d , %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
d_outReal[index] = realCoeff;
d_outImag[index] = imagCoeff;
}
else if(ndims == 3){
int s = threadIdx.z + blockDim.z * blockIdx.z;
int wy = (row - halfRes) * frequencyStep;
int wx = (col - halfRes) * frequencyStep;
int wz = (s - halfRes) * frequencyStep;
for(int i = 0; i < npts; i++){
double exp = -twopi * (wx * d_inPoints[3*i] + wy * d_inPoints[3*i+1] + wz * d_inPoints[3*i+2]);
realCoeff += d_inFuncVals[i] * cosf(exp);
imagCoeff += d_inFuncVals[i] * sinf(exp);
}
__syncthreads();
int index = s*resolution*resolution + row * resolution + col;
//printf("I am thread %d , %d in block %d , %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
d_outReal[index] = realCoeff;
d_outImag[index] = imagCoeff;
}
else if(ndims == 4){
int s = threadIdx.z + blockDim.z * blockIdx.z;
int wy = (row - halfRes) * frequencyStep;
int wx = (col - halfRes) * frequencyStep;
int wz = (s - halfRes) * frequencyStep;
__syncthreads();
for(int u = 0; u < resolution; u++)
//int u = halfRes;
{
int wu = (u - halfRes) * frequencyStep;
//printf("\n %d, %d, %d, %d %d: ", wx, wy, wz, wu, s);
for(int i = 0; i < npts; i++){
double exp = -twopi * (wx * d_inPoints[4*i] +
wy * d_inPoints[4*i+1] +
wz * d_inPoints[4*i+2] +
wu * d_inPoints[4*i+3]);
realCoeff += d_inFuncVals[i] * cosf(exp);
imagCoeff += d_inFuncVals[i] * sinf(exp);
}
__syncthreads();
int index = u*resolution*resolution*resolution +
s*resolution*resolution +
row * resolution + col;
//printf("I am thread %d , %d in block %d , %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
d_outReal[index] = realCoeff;
d_outImag[index] = imagCoeff;
__syncthreads();
}
}
else if(ndims == 5){
int s = threadIdx.z + blockDim.z * blockIdx.z;
int wy = (row - halfRes) * frequencyStep;
int wx = (col - halfRes) * frequencyStep;
int wz = (s - halfRes) * frequencyStep;
for(int u = 0; u< resolution; u++){
for(int v = 0; v < resolution; v++){
int wu = (u - halfRes) * frequencyStep;
int wv = (v - halfRes) * frequencyStep;
for(int i = 0; i < npts; i++){
double exp = -twopi * (wx * d_inPoints[5*i] +
wy * d_inPoints[5*i+1] +
wz * d_inPoints[5*i+2] +
wu * d_inPoints[5*i+3] +
wv * d_inPoints[5*i+4]);
realCoeff += d_inFuncVals[i] * cosf(exp);
imagCoeff += d_inFuncVals[i] * sinf(exp);
}
__syncthreads();
int index = v*resolution*resolution*resolution*resolution +
u*resolution*resolution*resolution +
s*resolution*resolution +
row * resolution + col;
//printf("I am thread %d , %d in block %d , %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
d_outReal[index] = realCoeff;
d_outImag[index] = imagCoeff;
}
}
}
///Do not uncomment!!!
///Division by N for real and Imaginaru coeffs is done in the
/// C++ Host machine FourierAnalyzer code
//realCoeffs /= N; imagCoeffs /= N;
__syncthreads();
// if(row == 384 && col == 384)
// printf("\nreal %f and imag %f : resolution: %d %d", realCoeff, imagCoeff, numRows, numCols);
// __syncthreads();
}
}
void cftmd_cuda(double * d_outReal, double * d_outImag,
double* d_inPoints, double* d_inFuncVals,
int npts, int resolution, int ndims,
double twopi, float frequencyStep){
hipDeviceSynchronize();
//printf("Image Size: %d %d \n", numRows, numCols);
if(ndims == 2)
{
const dim3 blockThreadSize(16, 16);
int bx = (resolution + blockThreadSize.x - 1 ) / blockThreadSize.x;
int by = (resolution + blockThreadSize.y - 1 ) / blockThreadSize.y;
dim3 gridBlockSize( bx, by, 1);
hipLaunchKernelGGL(( fouriermd_compute_kernel), dim3(gridBlockSize), dim3(blockThreadSize), 0, 0, d_outReal, d_outImag, d_inPoints,
d_inFuncVals, npts, resolution, ndims, twopi, frequencyStep);
}
else{
const dim3 blockThreadSize(8,8,8);
int bx = (resolution + blockThreadSize.x - 1 ) / blockThreadSize.x;
int by = (resolution + blockThreadSize.y - 1 ) / blockThreadSize.y;
int bz = (resolution + blockThreadSize.z - 1 ) / blockThreadSize.z;
dim3 gridBlockSize( bx, by, bz);
hipLaunchKernelGGL(( fouriermd_compute_kernel), dim3(gridBlockSize), dim3(blockThreadSize), 0, 0, d_outReal, d_outImag, d_inPoints,
d_inFuncVals, npts, resolution, ndims, twopi, frequencyStep);
}
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| fb2b7a7e9d0e1a66e5ad2ea3f74749a046ef93f5.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <utils_cuda.h>
#include <point2d.h>
#include <vector>
#include <thrust/complex.h>
__global__
void fouriermd_compute_kernel(double * d_outReal, double * d_outImag,
double* d_inPoints, double* d_inFuncVals,
int npts, int resolution, int ndims,
double twopi, float frequencyStep){
int row = threadIdx.x + blockDim.x * blockIdx.x;
int col = threadIdx.y + blockDim.y * blockIdx.y;
//printf("thread indices %d , %d \n", m , l);
if ( col >= resolution || row >= resolution)// || s >= points.size())
{
return;
}
else{
double realCoeff = 0.0, imagCoeff = 0.0;
__syncthreads();
int halfRes = resolution * 0.5;
if(ndims == 2){
int wy = (row - halfRes) * frequencyStep;
int wx = (col - halfRes) * frequencyStep;
for(int i = 0; i < npts; i++){
double exp = -twopi * (wx * d_inPoints[2*i] + wy * d_inPoints[2*i+1]);
realCoeff += d_inFuncVals[i] * cosf(exp);
imagCoeff += d_inFuncVals[i] * sinf(exp);
}
__syncthreads();
int index = row * resolution + col;
//printf("I am thread %d , %d in block %d , %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
d_outReal[index] = realCoeff;
d_outImag[index] = imagCoeff;
}
else if(ndims == 3){
int s = threadIdx.z + blockDim.z * blockIdx.z;
int wy = (row - halfRes) * frequencyStep;
int wx = (col - halfRes) * frequencyStep;
int wz = (s - halfRes) * frequencyStep;
for(int i = 0; i < npts; i++){
double exp = -twopi * (wx * d_inPoints[3*i] + wy * d_inPoints[3*i+1] + wz * d_inPoints[3*i+2]);
realCoeff += d_inFuncVals[i] * cosf(exp);
imagCoeff += d_inFuncVals[i] * sinf(exp);
}
__syncthreads();
int index = s*resolution*resolution + row * resolution + col;
//printf("I am thread %d , %d in block %d , %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
d_outReal[index] = realCoeff;
d_outImag[index] = imagCoeff;
}
else if(ndims == 4){
int s = threadIdx.z + blockDim.z * blockIdx.z;
int wy = (row - halfRes) * frequencyStep;
int wx = (col - halfRes) * frequencyStep;
int wz = (s - halfRes) * frequencyStep;
__syncthreads();
for(int u = 0; u < resolution; u++)
//int u = halfRes;
{
int wu = (u - halfRes) * frequencyStep;
//printf("\n %d, %d, %d, %d %d: ", wx, wy, wz, wu, s);
for(int i = 0; i < npts; i++){
double exp = -twopi * (wx * d_inPoints[4*i] +
wy * d_inPoints[4*i+1] +
wz * d_inPoints[4*i+2] +
wu * d_inPoints[4*i+3]);
realCoeff += d_inFuncVals[i] * cosf(exp);
imagCoeff += d_inFuncVals[i] * sinf(exp);
}
__syncthreads();
int index = u*resolution*resolution*resolution +
s*resolution*resolution +
row * resolution + col;
//printf("I am thread %d , %d in block %d , %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
d_outReal[index] = realCoeff;
d_outImag[index] = imagCoeff;
__syncthreads();
}
}
else if(ndims == 5){
int s = threadIdx.z + blockDim.z * blockIdx.z;
int wy = (row - halfRes) * frequencyStep;
int wx = (col - halfRes) * frequencyStep;
int wz = (s - halfRes) * frequencyStep;
for(int u = 0; u< resolution; u++){
for(int v = 0; v < resolution; v++){
int wu = (u - halfRes) * frequencyStep;
int wv = (v - halfRes) * frequencyStep;
for(int i = 0; i < npts; i++){
double exp = -twopi * (wx * d_inPoints[5*i] +
wy * d_inPoints[5*i+1] +
wz * d_inPoints[5*i+2] +
wu * d_inPoints[5*i+3] +
wv * d_inPoints[5*i+4]);
realCoeff += d_inFuncVals[i] * cosf(exp);
imagCoeff += d_inFuncVals[i] * sinf(exp);
}
__syncthreads();
int index = v*resolution*resolution*resolution*resolution +
u*resolution*resolution*resolution +
s*resolution*resolution +
row * resolution + col;
//printf("I am thread %d , %d in block %d , %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
d_outReal[index] = realCoeff;
d_outImag[index] = imagCoeff;
}
}
}
///Do not uncomment!!!
///Division by N for real and Imaginaru coeffs is done in the
/// C++ Host machine FourierAnalyzer code
//realCoeffs /= N; imagCoeffs /= N;
__syncthreads();
// if(row == 384 && col == 384)
// printf("\nreal %f and imag %f : resolution: %d %d", realCoeff, imagCoeff, numRows, numCols);
// __syncthreads();
}
}
void cftmd_cuda(double * d_outReal, double * d_outImag,
double* d_inPoints, double* d_inFuncVals,
int npts, int resolution, int ndims,
double twopi, float frequencyStep){
cudaDeviceSynchronize();
//printf("Image Size: %d %d \n", numRows, numCols);
if(ndims == 2)
{
const dim3 blockThreadSize(16, 16);
int bx = (resolution + blockThreadSize.x - 1 ) / blockThreadSize.x;
int by = (resolution + blockThreadSize.y - 1 ) / blockThreadSize.y;
dim3 gridBlockSize( bx, by, 1);
fouriermd_compute_kernel<<<gridBlockSize, blockThreadSize>>>(d_outReal, d_outImag, d_inPoints,
d_inFuncVals, npts, resolution, ndims, twopi, frequencyStep);
}
else{
const dim3 blockThreadSize(8,8,8);
int bx = (resolution + blockThreadSize.x - 1 ) / blockThreadSize.x;
int by = (resolution + blockThreadSize.y - 1 ) / blockThreadSize.y;
int bz = (resolution + blockThreadSize.z - 1 ) / blockThreadSize.z;
dim3 gridBlockSize( bx, by, bz);
fouriermd_compute_kernel<<<gridBlockSize, blockThreadSize>>>(d_outReal, d_outImag, d_inPoints,
d_inFuncVals, npts, resolution, ndims, twopi, frequencyStep);
}
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
a599807b8614466bc54e37f6618257963e69e064.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SparseFrame.h"
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__global__ void createMap_kernel ( Long *d_Map, Long *d_Lsi, Long sip_offset, Long lds )
{
Long si;
si = blockIdx.x * blockDim.x + threadIdx.x;
if ( si < lds )
d_Map [ d_Lsi [ sip_offset + si ] ] = si;
}
void createMap ( Long *d_Map, Long *d_Lsi, Long sip_offset, Long lds, hipStream_t stream )
{
dim3 block, thread;
thread.x = CUDA_BLOCKDIM_X * CUDA_BLOCKDIM_Y;
block.x = ( lds + thread.x - 1 ) / thread.x;
hipLaunchKernelGGL(( createMap_kernel) , dim3(block), dim3(thread), 0, stream , d_Map, d_Lsi, sip_offset, lds );
}
__global__ void createRelativeMap_kernel ( Long *d_RelativeMap, Long *d_Map, Long *d_Lsi, Long dip_offset, Long ldd )
{
Long di;
di = blockIdx.x * blockDim.x + threadIdx.x;
if ( di < ldd )
d_RelativeMap[di] = d_Map [ d_Lsi [ dip_offset + di ] ];
}
void createRelativeMap ( Long *d_RelativeMap, Long *d_Map, Long *d_Lsi, Long dip_offset, Long ldd, hipStream_t stream )
{
dim3 block, thread;
thread.x = CUDA_BLOCKDIM_X * CUDA_BLOCKDIM_Y;
block.x = ( ldd + thread.x - 1 ) / thread.x;
hipLaunchKernelGGL(( createRelativeMap_kernel) , dim3(block), dim3(thread), 0, stream , d_RelativeMap, d_Map, d_Lsi, dip_offset, ldd );
}
__global__ void mappedSubtract_kernel ( int isAtomic, int isComplex, void *d_A, Long nscol, Long nsrow, Long lda, void *d_C, Long cj_offset, Long ci_offset, Long nccol, Long ncrow, Long ldc, Long *d_RelativeMap )
{
__shared__ Long shRelativeMap_j[CUDA_BLOCKDIM_X];
__shared__ Long shRelativeMap_i[CUDA_BLOCKDIM_Y];
Long cj, ci;
cj = cj_offset + blockIdx.x * blockDim.x + threadIdx.x;
ci = ci_offset + blockIdx.y * blockDim.y + threadIdx.y;
if ( threadIdx.y == 0 && cj < nccol )
shRelativeMap_j[threadIdx.x] = d_RelativeMap[cj];
if ( threadIdx.x == 0 && ci < ncrow )
shRelativeMap_i[threadIdx.y] = d_RelativeMap[ci];
__syncthreads();
if ( !isAtomic )
{
if ( !isComplex )
{
if ( cj < nccol && ci < ncrow )
{
( (Float*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ] -= ( (Float*) d_C ) [ cj * ldc + ci ];
if ( ci >= nccol )
( (Float*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ] -= ( (Float*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ];
}
}
else
{
if ( cj < nccol && ci < ncrow )
{
( (Complex*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].x -= ( (Complex*) d_C ) [ cj * ldc + ci ].x;
( (Complex*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].y -= ( (Complex*) d_C ) [ cj * ldc + ci ].y;
if ( ci >= nccol )
{
( (Complex*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].x -= ( (Complex*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ].x;
( (Complex*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].y -= ( (Complex*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ].y;
}
}
}
}
else
{
if ( !isComplex )
{
if ( cj < nccol && ci < ncrow )
{
atomicAdd ( & ( ( (Float*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ] ), - ( (Float*) d_C ) [ cj * ldc + ci ] );
if ( ci >= nccol )
atomicAdd ( & ( ( (Float*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ] ), - ( (Float*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ] );
}
}
else
{
if ( cj < nccol && ci < ncrow )
{
atomicAdd ( & ( ( (Complex*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].x ), - ( (Complex*) d_C ) [ cj * ldc + ci ].x );
atomicAdd ( & ( ( (Complex*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].y ), - ( (Complex*) d_C ) [ cj * ldc + ci ].y );
if ( ci >= nccol )
{
atomicAdd ( & ( ( (Complex*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].x ), - ( (Complex*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ].x );
atomicAdd ( & ( ( (Complex*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].y ), - ( (Complex*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ].y );
}
}
}
}
}
void mappedSubtract ( int isAtomic, int isComplex, void *d_A, Long nscol, Long nsrow, Long lda, void *d_C, Long cj_offset, Long ci_offset, Long nccol, Long ncrow, Long ldc, Long *d_RelativeMap, hipStream_t stream )
{
dim3 block, thread;
thread.x = CUDA_BLOCKDIM_X;
thread.y = CUDA_BLOCKDIM_Y;
block.x = ( nccol + thread.x - 1 ) / thread.x;
block.y = ( ncrow + thread.y - 1 ) / thread.y;
hipLaunchKernelGGL(( mappedSubtract_kernel) , dim3(block), dim3(thread), 0, stream , isAtomic, isComplex, d_A, nscol, nsrow, lda, d_C, cj_offset, ci_offset, nccol, ncrow, ldc, d_RelativeMap );
}
__global__ void deviceSum_kernel ( int isComplex, void *d_A, void *d_B, void *d_C, Long nscol, Long nsrow, Long lda )
{
Long sj, si;
sj = blockIdx.x * blockDim.x + threadIdx.x;
si = blockIdx.y * blockDim.y + threadIdx.y;
if ( !isComplex )
{
if ( sj < nscol && si < nsrow )
( (Float*) d_A ) [ sj * lda + si ] = ( (Float*) d_B ) [ sj * lda + si ] + ( (Float*) d_C ) [ sj * lda + si ];
}
else
{
if ( sj < nscol && si < nsrow )
{
( (Complex*) d_A ) [ sj * lda + si ].x = ( (Complex*) d_B ) [ sj * lda + si ].x + ( (Complex*) d_C ) [ sj * lda + si ].x;
( (Complex*) d_A ) [ sj * lda + si ].y = ( (Complex*) d_B ) [ sj * lda + si ].y + ( (Complex*) d_C ) [ sj * lda + si ].y;
}
}
}
void deviceSum ( int isComplex, void *d_A, void *d_B, void *d_C, Long nscol, Long nsrow, Long lda, hipStream_t stream )
{
dim3 block, thread;
thread.x = CUDA_BLOCKDIM_X;
thread.y = CUDA_BLOCKDIM_Y;
block.x = ( nscol + thread.x - 1 ) / thread.x;
block.y = ( nsrow + thread.y - 1 ) / thread.y;
hipLaunchKernelGGL(( deviceSum_kernel) , dim3(block), dim3(thread), 0, stream , isComplex, d_A, d_B, d_C, nscol, nsrow, lda );
}
| a599807b8614466bc54e37f6618257963e69e064.cu | #include "SparseFrame.h"
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__global__ void createMap_kernel ( Long *d_Map, Long *d_Lsi, Long sip_offset, Long lds )
{
Long si;
si = blockIdx.x * blockDim.x + threadIdx.x;
if ( si < lds )
d_Map [ d_Lsi [ sip_offset + si ] ] = si;
}
void createMap ( Long *d_Map, Long *d_Lsi, Long sip_offset, Long lds, cudaStream_t stream )
{
dim3 block, thread;
thread.x = CUDA_BLOCKDIM_X * CUDA_BLOCKDIM_Y;
block.x = ( lds + thread.x - 1 ) / thread.x;
createMap_kernel <<< block, thread, 0, stream >>> ( d_Map, d_Lsi, sip_offset, lds );
}
__global__ void createRelativeMap_kernel ( Long *d_RelativeMap, Long *d_Map, Long *d_Lsi, Long dip_offset, Long ldd )
{
Long di;
di = blockIdx.x * blockDim.x + threadIdx.x;
if ( di < ldd )
d_RelativeMap[di] = d_Map [ d_Lsi [ dip_offset + di ] ];
}
void createRelativeMap ( Long *d_RelativeMap, Long *d_Map, Long *d_Lsi, Long dip_offset, Long ldd, cudaStream_t stream )
{
dim3 block, thread;
thread.x = CUDA_BLOCKDIM_X * CUDA_BLOCKDIM_Y;
block.x = ( ldd + thread.x - 1 ) / thread.x;
createRelativeMap_kernel <<< block, thread, 0, stream >>> ( d_RelativeMap, d_Map, d_Lsi, dip_offset, ldd );
}
__global__ void mappedSubtract_kernel ( int isAtomic, int isComplex, void *d_A, Long nscol, Long nsrow, Long lda, void *d_C, Long cj_offset, Long ci_offset, Long nccol, Long ncrow, Long ldc, Long *d_RelativeMap )
{
__shared__ Long shRelativeMap_j[CUDA_BLOCKDIM_X];
__shared__ Long shRelativeMap_i[CUDA_BLOCKDIM_Y];
Long cj, ci;
cj = cj_offset + blockIdx.x * blockDim.x + threadIdx.x;
ci = ci_offset + blockIdx.y * blockDim.y + threadIdx.y;
if ( threadIdx.y == 0 && cj < nccol )
shRelativeMap_j[threadIdx.x] = d_RelativeMap[cj];
if ( threadIdx.x == 0 && ci < ncrow )
shRelativeMap_i[threadIdx.y] = d_RelativeMap[ci];
__syncthreads();
if ( !isAtomic )
{
if ( !isComplex )
{
if ( cj < nccol && ci < ncrow )
{
( (Float*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ] -= ( (Float*) d_C ) [ cj * ldc + ci ];
if ( ci >= nccol )
( (Float*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ] -= ( (Float*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ];
}
}
else
{
if ( cj < nccol && ci < ncrow )
{
( (Complex*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].x -= ( (Complex*) d_C ) [ cj * ldc + ci ].x;
( (Complex*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].y -= ( (Complex*) d_C ) [ cj * ldc + ci ].y;
if ( ci >= nccol )
{
( (Complex*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].x -= ( (Complex*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ].x;
( (Complex*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].y -= ( (Complex*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ].y;
}
}
}
}
else
{
if ( !isComplex )
{
if ( cj < nccol && ci < ncrow )
{
atomicAdd ( & ( ( (Float*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ] ), - ( (Float*) d_C ) [ cj * ldc + ci ] );
if ( ci >= nccol )
atomicAdd ( & ( ( (Float*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ] ), - ( (Float*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ] );
}
}
else
{
if ( cj < nccol && ci < ncrow )
{
atomicAdd ( & ( ( (Complex*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].x ), - ( (Complex*) d_C ) [ cj * ldc + ci ].x );
atomicAdd ( & ( ( (Complex*) d_A ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].y ), - ( (Complex*) d_C ) [ cj * ldc + ci ].y );
if ( ci >= nccol )
{
atomicAdd ( & ( ( (Complex*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].x ), - ( (Complex*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ].x );
atomicAdd ( & ( ( (Complex*) d_A + ( nsrow - nscol ) ) [ shRelativeMap_j[threadIdx.x] * lda + shRelativeMap_i[threadIdx.y] ].y ), - ( (Complex*) d_C + ( ncrow - nccol ) ) [ cj * ldc + ci ].y );
}
}
}
}
}
void mappedSubtract ( int isAtomic, int isComplex, void *d_A, Long nscol, Long nsrow, Long lda, void *d_C, Long cj_offset, Long ci_offset, Long nccol, Long ncrow, Long ldc, Long *d_RelativeMap, cudaStream_t stream )
{
dim3 block, thread;
thread.x = CUDA_BLOCKDIM_X;
thread.y = CUDA_BLOCKDIM_Y;
block.x = ( nccol + thread.x - 1 ) / thread.x;
block.y = ( ncrow + thread.y - 1 ) / thread.y;
mappedSubtract_kernel <<< block, thread, 0, stream >>> ( isAtomic, isComplex, d_A, nscol, nsrow, lda, d_C, cj_offset, ci_offset, nccol, ncrow, ldc, d_RelativeMap );
}
__global__ void deviceSum_kernel ( int isComplex, void *d_A, void *d_B, void *d_C, Long nscol, Long nsrow, Long lda )
{
Long sj, si;
sj = blockIdx.x * blockDim.x + threadIdx.x;
si = blockIdx.y * blockDim.y + threadIdx.y;
if ( !isComplex )
{
if ( sj < nscol && si < nsrow )
( (Float*) d_A ) [ sj * lda + si ] = ( (Float*) d_B ) [ sj * lda + si ] + ( (Float*) d_C ) [ sj * lda + si ];
}
else
{
if ( sj < nscol && si < nsrow )
{
( (Complex*) d_A ) [ sj * lda + si ].x = ( (Complex*) d_B ) [ sj * lda + si ].x + ( (Complex*) d_C ) [ sj * lda + si ].x;
( (Complex*) d_A ) [ sj * lda + si ].y = ( (Complex*) d_B ) [ sj * lda + si ].y + ( (Complex*) d_C ) [ sj * lda + si ].y;
}
}
}
void deviceSum ( int isComplex, void *d_A, void *d_B, void *d_C, Long nscol, Long nsrow, Long lda, cudaStream_t stream )
{
dim3 block, thread;
thread.x = CUDA_BLOCKDIM_X;
thread.y = CUDA_BLOCKDIM_Y;
block.x = ( nscol + thread.x - 1 ) / thread.x;
block.y = ( nsrow + thread.y - 1 ) / thread.y;
deviceSum_kernel <<< block, thread, 0, stream >>> ( isComplex, d_A, d_B, d_C, nscol, nsrow, lda );
}
|
cb22ee73ba4ded72ed389faad727572ac744c0ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cutilDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.hip"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf_hip.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
hipMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" hipError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess)
return hipErrorInitializationError;
hipMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return hipSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
hipFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return hipErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
hipMemset(printfbuf_device, 0, printfbuf_len);
return hipSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
| cb22ee73ba4ded72ed389faad727572ac744c0ed.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cutilDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.cu"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess)
return cudaErrorInitializationError;
cudaMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return cudaSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
cudaFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return cudaErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
cudaMemset(printfbuf_device, 0, printfbuf_len);
return cudaSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
|
62e14d910c524d2dcdda78e3a76d6563b44d7e03.hip | // !!! This is a file automatically generated by hipify!!!
#include "BackwardDeltaKernel.hpp"
#include "Constants.hpp"
#include <hip/hip_runtime.h>
using namespace neuralnetwork;
using namespace neuralnetwork::cuda;
// computes outDelta = tw * nextDelta (elemwisemul) layerOutput.derivatives
__global__ void backwardDeltaKernel(LayerBatchDeltas nextDelta, LayerWeights tw,
LayerBatchOutputs layerOutput, LayerBatchDeltas outDelta,
unsigned spitch) {
extern __shared__ float buf[]; // shared memory buffer
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
const int numChunks = (tw.inputSize + blockDim.x - 1) / blockDim.x;
// buffer for holding the layer weight matrix chunk
float *twChunk = (float *) buf;
// buffer for holding the prev outputs matrix chunk
float *ndChunk = (float *) &buf[spitch * blockDim.y];
const int twRow = blockDim.x * blockIdx.x + threadIdx.y;
const int ndRow = row;
const int chunkIndex = threadIdx.x + threadIdx.y * spitch;
const int lim = numChunks * blockDim.x;
float sum = 0.0f;
for (int chunkOffset = 0; chunkOffset < lim; chunkOffset += blockDim.x) {
const int twCol = chunkOffset + threadIdx.x;
if (twRow < tw.layerSize && twCol < tw.inputSize) {
twChunk[chunkIndex] = *tw.Elem(twRow, twCol);
}
const int ndCol = twCol;
if (ndRow < nextDelta.batchSize && ndCol < nextDelta.layerSize) {
ndChunk[chunkIndex] = *nextDelta.Elem(ndRow, ndCol);
}
__syncthreads();
int chunkLim = min(blockDim.x, tw.inputSize - chunkOffset);
for (int j = 0; j < chunkLim; j++) {
sum += twChunk[j + threadIdx.x * spitch] * ndChunk[j + threadIdx.y * spitch];
}
__syncthreads();
}
if (row < outDelta.batchSize && col < outDelta.layerSize) {
float od = *layerOutput.DerivativeElem(row, col);
*outDelta.Elem(row, col) = sum * od;
}
}
void BackwardDeltaKernel::Apply(LayerBatchDeltas nextDelta, LayerWeights transposedWeights,
LayerBatchOutputs layerOutput, LayerBatchDeltas outDelta,
hipStream_t stream) {
// TODO: handle bank conflicts. Do the same in the forward kernel.
assert(nextDelta.layerSize == transposedWeights.inputSize);
assert(outDelta.layerSize == transposedWeights.layerSize - 1);
assert(outDelta.layerSize == layerOutput.layerSize - 1);
assert(nextDelta.batchSize == layerOutput.batchSize);
assert(nextDelta.batchSize == outDelta.batchSize);
int bpgX = (outDelta.layerSize + TPB_X - 1) / TPB_X;
int bpgY = (outDelta.batchSize + TPB_Y - 1) / TPB_Y;
unsigned spitch = (TPB_X + 1);
size_t sharedMemSize = 2 * spitch * TPB_Y * sizeof(float);
hipLaunchKernelGGL(( backwardDeltaKernel), dim3(dim3(bpgX, bpgY, 1)), dim3(dim3(TPB_X, TPB_Y, 1)), sharedMemSize, stream,
nextDelta, transposedWeights, layerOutput, outDelta, spitch);
}
| 62e14d910c524d2dcdda78e3a76d6563b44d7e03.cu |
#include "BackwardDeltaKernel.hpp"
#include "Constants.hpp"
#include <cuda_runtime.h>
using namespace neuralnetwork;
using namespace neuralnetwork::cuda;
// computes outDelta = tw * nextDelta (elemwisemul) layerOutput.derivatives
__global__ void backwardDeltaKernel(LayerBatchDeltas nextDelta, LayerWeights tw,
LayerBatchOutputs layerOutput, LayerBatchDeltas outDelta,
unsigned spitch) {
extern __shared__ float buf[]; // shared memory buffer
const unsigned row = blockDim.y * blockIdx.y + threadIdx.y;
const unsigned col = blockDim.x * blockIdx.x + threadIdx.x;
const int numChunks = (tw.inputSize + blockDim.x - 1) / blockDim.x;
// buffer for holding the layer weight matrix chunk
float *twChunk = (float *) buf;
// buffer for holding the prev outputs matrix chunk
float *ndChunk = (float *) &buf[spitch * blockDim.y];
const int twRow = blockDim.x * blockIdx.x + threadIdx.y;
const int ndRow = row;
const int chunkIndex = threadIdx.x + threadIdx.y * spitch;
const int lim = numChunks * blockDim.x;
float sum = 0.0f;
for (int chunkOffset = 0; chunkOffset < lim; chunkOffset += blockDim.x) {
const int twCol = chunkOffset + threadIdx.x;
if (twRow < tw.layerSize && twCol < tw.inputSize) {
twChunk[chunkIndex] = *tw.Elem(twRow, twCol);
}
const int ndCol = twCol;
if (ndRow < nextDelta.batchSize && ndCol < nextDelta.layerSize) {
ndChunk[chunkIndex] = *nextDelta.Elem(ndRow, ndCol);
}
__syncthreads();
int chunkLim = min(blockDim.x, tw.inputSize - chunkOffset);
for (int j = 0; j < chunkLim; j++) {
sum += twChunk[j + threadIdx.x * spitch] * ndChunk[j + threadIdx.y * spitch];
}
__syncthreads();
}
if (row < outDelta.batchSize && col < outDelta.layerSize) {
float od = *layerOutput.DerivativeElem(row, col);
*outDelta.Elem(row, col) = sum * od;
}
}
void BackwardDeltaKernel::Apply(LayerBatchDeltas nextDelta, LayerWeights transposedWeights,
LayerBatchOutputs layerOutput, LayerBatchDeltas outDelta,
cudaStream_t stream) {
// TODO: handle bank conflicts. Do the same in the forward kernel.
assert(nextDelta.layerSize == transposedWeights.inputSize);
assert(outDelta.layerSize == transposedWeights.layerSize - 1);
assert(outDelta.layerSize == layerOutput.layerSize - 1);
assert(nextDelta.batchSize == layerOutput.batchSize);
assert(nextDelta.batchSize == outDelta.batchSize);
int bpgX = (outDelta.layerSize + TPB_X - 1) / TPB_X;
int bpgY = (outDelta.batchSize + TPB_Y - 1) / TPB_Y;
unsigned spitch = (TPB_X + 1);
size_t sharedMemSize = 2 * spitch * TPB_Y * sizeof(float);
backwardDeltaKernel<<<dim3(bpgX, bpgY, 1), dim3(TPB_X, TPB_Y, 1), sharedMemSize, stream>>>(
nextDelta, transposedWeights, layerOutput, outDelta, spitch);
}
|
5d264a69252ab0cd87d09f17b341b76eac8dcd78.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// This file serve as a simple example for adding a tunable op to onnxruntime.
#if USE_ROCM
#include <hip/hip_runtime_api.h>
#include <hip/hip_fp16.h>
#elif USE_ROCM
#include <hip/hip_fp16.h>
#endif
#include <pybind11/pybind11.h>
#include <string>
#if USE_ROCM
#include "core/providers/cuda/tunable/cuda_tunable.h"
#elif USE_ROCM
#include "core/providers/rocm/tunable/rocm_tunable.h"
#endif
#include "python/tools/kernel_explorer/kernel_explorer_interface.h"
#include "python/tools/kernel_explorer/kernels/vector_add_kernel.cuh"
namespace py = pybind11;
namespace onnxruntime {
// #####################################################################################################################
// In practice, VectorAddParam, VectorAddOp and VectorAddTunableOp should be tightly integrated to onnxruntime.
// We place them here purely for demo purpose.
// #####################################################################################################################
// Extend the OpParams so that all specializations have the same parameter passing interface
template <typename T>
struct VectorAddParams :
#if USE_ROCM
cuda::tunable::OpParams
#elif USE_ROCM
rocm::tunable::OpParams
#endif
{
std::string Signature() const override { return std::to_string(n); }
T* x;
T* y;
T* z;
int n;
};
// Wrap the kernel function, so that we have a unified launch interface. If the kernel has state, the state can also
// be managed at this level via a functor
template <typename T, int TPB, int Vec>
Status VectorAddOp(const VectorAddParams<T>* params) {
return LaunchVectorAdd<T, TPB, Vec>(
params->stream,
params->x,
params->y,
params->z,
params->n);
}
#define ADD_OP(threads_per_block) \
this->RegisterOp(VectorAddOp<T, threads_per_block, 1>); \
this->RegisterOp(VectorAddOp<T, threads_per_block, 2>); \
this->RegisterOp(VectorAddOp<T, threads_per_block, 4>); \
this->RegisterOp(VectorAddOp<T, threads_per_block, 8>);
// A Tunable VectorAddOp is a collection of non-tunable VectorAddOps implementations that have variable performance
// characteristics. Those implementations may be put into a C++ container for tuner to select.
template <typename T>
class VectorAddTunableOp :
#if USE_ROCM
public cuda::tunable::TunableOp<VectorAddParams<T>>
#elif USE_ROCM
public rocm::tunable::TunableOp<VectorAddParams<T>>
#endif
{
public:
VectorAddTunableOp() {
ADD_OP(64);
ADD_OP(128);
ADD_OP(192);
ADD_OP(256);
ADD_OP(320);
ADD_OP(384);
ADD_OP(448);
ADD_OP(512);
}
};
#undef ADD_OP
// #####################################################################################################################
// Following code just wraps our kernel implementation and expose them as python interface. This is the code that
// should be in the kernel_explorer directory.
// #####################################################################################################################
template <typename T, int TPB, int Vec>
class VectorAdd : public IKernelExplorer {
public:
VectorAdd(DeviceArray& x, DeviceArray& y, DeviceArray& z, int n) {
params_.tuning_ctx = TuningContext();
params_.stream = Stream();
params_.x = static_cast<T*>(x.ptr());
params_.y = static_cast<T*>(y.ptr());
params_.z = static_cast<T*>(z.ptr());
params_.n = n;
}
void Run() override {
ORT_THROW_IF_ERROR((VectorAddOp<T, TPB, Vec>(¶ms_)));
}
private:
// A VectorAddOp<T> is a callable that can process const VectorAddParams<T>*
using ParamsT = VectorAddParams<T>;
ParamsT params_{};
};
template <typename T>
class VectorAddTunable : public IKernelExplorer {
public:
VectorAddTunable(DeviceArray& x, DeviceArray& y, DeviceArray& z, int n) {
params_.tuning_ctx = TuningContext();
params_.stream = Stream();
params_.x = static_cast<T*>(x.ptr());
params_.y = static_cast<T*>(y.ptr());
params_.z = static_cast<T*>(z.ptr());
params_.n = n;
params_.TuningContext()->EnableTunableOp();
}
void Run() override {
ORT_THROW_IF_ERROR(impl_(¶ms_));
}
private:
using ParamsT = VectorAddParams<T>;
ParamsT params_;
// tunable is stateful, store it as an instance
VectorAddTunableOp<T> impl_;
};
#define REGISTER_OP(name, type, threads_per_block, vec_size) \
py::class_<name<type, threads_per_block, vec_size>>(m, #name"_"#type"_"#threads_per_block"_"#vec_size) \
.def(py::init<DeviceArray&, DeviceArray&, DeviceArray&, int>()) \
.def("SetRepeats", &name<type, threads_per_block, vec_size>::SetRepeats) \
.def("Profile", &name<type, threads_per_block, vec_size>::Profile) \
.def("Run", &name<type, threads_per_block, vec_size>::Run);
#define REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, threads_per_block) \
REGISTER_OP(name, type, threads_per_block, 1) \
REGISTER_OP(name, type, threads_per_block, 2) \
REGISTER_OP(name, type, threads_per_block, 4) \
REGISTER_OP(name, type, threads_per_block, 8)
#define REGISTER_OP_FOR_ALL_THREADS_PER_BLOCK(name, type) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 64) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 128) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 192) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 256) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 320) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 384) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 448) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 512)
#define REGISTER_TUNABLE_OP(type) \
py::class_<VectorAddTunable<type>>(m, "VectorAdd_" #type "_Tunable") \
.def(py::init<DeviceArray&, DeviceArray&, DeviceArray&, int>()) \
.def("SetRepeats", &VectorAddTunable<type>::SetRepeats) \
.def("Profile", &VectorAddTunable<type>::Profile) \
.def("Run", &VectorAddTunable<type>::Run);
KE_REGISTER(m) {
REGISTER_OP_FOR_ALL_THREADS_PER_BLOCK(VectorAdd, half);
REGISTER_OP_FOR_ALL_THREADS_PER_BLOCK(VectorAdd, float);
REGISTER_TUNABLE_OP(half);
REGISTER_TUNABLE_OP(float)
}
} // namespace onnxruntime
| 5d264a69252ab0cd87d09f17b341b76eac8dcd78.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// This file serve as a simple example for adding a tunable op to onnxruntime.
#if USE_CUDA
#include <cuda_runtime_api.h>
#include <cuda_fp16.h>
#elif USE_ROCM
#include <hip/hip_fp16.h>
#endif
#include <pybind11/pybind11.h>
#include <string>
#if USE_CUDA
#include "core/providers/cuda/tunable/cuda_tunable.h"
#elif USE_ROCM
#include "core/providers/rocm/tunable/rocm_tunable.h"
#endif
#include "python/tools/kernel_explorer/kernel_explorer_interface.h"
#include "python/tools/kernel_explorer/kernels/vector_add_kernel.cuh"
namespace py = pybind11;
namespace onnxruntime {
// #####################################################################################################################
// In practice, VectorAddParam, VectorAddOp and VectorAddTunableOp should be tightly integrated to onnxruntime.
// We place them here purely for demo purpose.
// #####################################################################################################################
// Extend the OpParams so that all specializations have the same parameter passing interface
template <typename T>
struct VectorAddParams :
#if USE_CUDA
cuda::tunable::OpParams
#elif USE_ROCM
rocm::tunable::OpParams
#endif
{
std::string Signature() const override { return std::to_string(n); }
T* x;
T* y;
T* z;
int n;
};
// Wrap the kernel function, so that we have a unified launch interface. If the kernel has state, the state can also
// be managed at this level via a functor
template <typename T, int TPB, int Vec>
Status VectorAddOp(const VectorAddParams<T>* params) {
return LaunchVectorAdd<T, TPB, Vec>(
params->stream,
params->x,
params->y,
params->z,
params->n);
}
#define ADD_OP(threads_per_block) \
this->RegisterOp(VectorAddOp<T, threads_per_block, 1>); \
this->RegisterOp(VectorAddOp<T, threads_per_block, 2>); \
this->RegisterOp(VectorAddOp<T, threads_per_block, 4>); \
this->RegisterOp(VectorAddOp<T, threads_per_block, 8>);
// A Tunable VectorAddOp is a collection of non-tunable VectorAddOps implementations that have variable performance
// characteristics. Those implementations may be put into a C++ container for tuner to select.
template <typename T>
class VectorAddTunableOp :
#if USE_CUDA
public cuda::tunable::TunableOp<VectorAddParams<T>>
#elif USE_ROCM
public rocm::tunable::TunableOp<VectorAddParams<T>>
#endif
{
public:
VectorAddTunableOp() {
ADD_OP(64);
ADD_OP(128);
ADD_OP(192);
ADD_OP(256);
ADD_OP(320);
ADD_OP(384);
ADD_OP(448);
ADD_OP(512);
}
};
#undef ADD_OP
// #####################################################################################################################
// Following code just wraps our kernel implementation and expose them as python interface. This is the code that
// should be in the kernel_explorer directory.
// #####################################################################################################################
template <typename T, int TPB, int Vec>
class VectorAdd : public IKernelExplorer {
public:
VectorAdd(DeviceArray& x, DeviceArray& y, DeviceArray& z, int n) {
params_.tuning_ctx = TuningContext();
params_.stream = Stream();
params_.x = static_cast<T*>(x.ptr());
params_.y = static_cast<T*>(y.ptr());
params_.z = static_cast<T*>(z.ptr());
params_.n = n;
}
void Run() override {
ORT_THROW_IF_ERROR((VectorAddOp<T, TPB, Vec>(¶ms_)));
}
private:
// A VectorAddOp<T> is a callable that can process const VectorAddParams<T>*
using ParamsT = VectorAddParams<T>;
ParamsT params_{};
};
template <typename T>
class VectorAddTunable : public IKernelExplorer {
public:
VectorAddTunable(DeviceArray& x, DeviceArray& y, DeviceArray& z, int n) {
params_.tuning_ctx = TuningContext();
params_.stream = Stream();
params_.x = static_cast<T*>(x.ptr());
params_.y = static_cast<T*>(y.ptr());
params_.z = static_cast<T*>(z.ptr());
params_.n = n;
params_.TuningContext()->EnableTunableOp();
}
void Run() override {
ORT_THROW_IF_ERROR(impl_(¶ms_));
}
private:
using ParamsT = VectorAddParams<T>;
ParamsT params_;
// tunable is stateful, store it as an instance
VectorAddTunableOp<T> impl_;
};
#define REGISTER_OP(name, type, threads_per_block, vec_size) \
py::class_<name<type, threads_per_block, vec_size>>(m, #name"_"#type"_"#threads_per_block"_"#vec_size) \
.def(py::init<DeviceArray&, DeviceArray&, DeviceArray&, int>()) \
.def("SetRepeats", &name<type, threads_per_block, vec_size>::SetRepeats) \
.def("Profile", &name<type, threads_per_block, vec_size>::Profile) \
.def("Run", &name<type, threads_per_block, vec_size>::Run);
#define REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, threads_per_block) \
REGISTER_OP(name, type, threads_per_block, 1) \
REGISTER_OP(name, type, threads_per_block, 2) \
REGISTER_OP(name, type, threads_per_block, 4) \
REGISTER_OP(name, type, threads_per_block, 8)
#define REGISTER_OP_FOR_ALL_THREADS_PER_BLOCK(name, type) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 64) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 128) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 192) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 256) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 320) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 384) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 448) \
REGISTER_OP_FOR_ALL_VEC_SIZE(name, type, 512)
#define REGISTER_TUNABLE_OP(type) \
py::class_<VectorAddTunable<type>>(m, "VectorAdd_" #type "_Tunable") \
.def(py::init<DeviceArray&, DeviceArray&, DeviceArray&, int>()) \
.def("SetRepeats", &VectorAddTunable<type>::SetRepeats) \
.def("Profile", &VectorAddTunable<type>::Profile) \
.def("Run", &VectorAddTunable<type>::Run);
KE_REGISTER(m) {
REGISTER_OP_FOR_ALL_THREADS_PER_BLOCK(VectorAdd, half);
REGISTER_OP_FOR_ALL_THREADS_PER_BLOCK(VectorAdd, float);
REGISTER_TUNABLE_OP(half);
REGISTER_TUNABLE_OP(float)
}
} // namespace onnxruntime
|
a90114bb6614ac5f722a8f77c6f519c0d768322e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Get gravitational masses.
Copyright (C) 2011 Edgard Nikitiuk <edgnik@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _GET_MASS_
#define _GET_MASS_
#include "gsa.h"
__global__ void get_mass( float *best, float *worst, float *fitness, float *mass )
{
int p=blockIdx.x;
if( *best == *worst )
mass[p] = 1.0f;
else
mass[p] = ( (fitness[p] - *worst) / (*best - *worst) );
}
#endif
| a90114bb6614ac5f722a8f77c6f519c0d768322e.cu | /*
Get gravitational masses.
Copyright (C) 2011 Edgard Nikitiuk <edgnik@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _GET_MASS_
#define _GET_MASS_
#include "gsa.h"
__global__ void get_mass( float *best, float *worst, float *fitness, float *mass )
{
int p=blockIdx.x;
if( *best == *worst )
mass[p] = 1.0f;
else
mass[p] = ( (fitness[p] - *worst) / (*best - *worst) );
}
#endif
|
c0bfe0b45913fb1983d2b106edc7fd7ee2c3287e.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2015, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/dim4.hpp>
#include <af/defines.h>
#include <ArrayInfo.hpp>
#include <Array.hpp>
#include <err_cuda.hpp>
#include <handle.hpp>
#include <arith.hpp>
#include <random.hpp>
#include <kernel/homography.hpp>
#include <algorithm>
#include <iostream>
#include <cfloat>
using af::dim4;
namespace cuda
{
#define RANSACConfidence 0.99f
#define LMEDSConfidence 0.99f
#define LMEDSOutlierRatio 0.4f
template<typename T>
int homography(Array<T> &bestH,
const Array<float> &x_src,
const Array<float> &y_src,
const Array<float> &x_dst,
const Array<float> &y_dst,
const af_homography_type htype,
const float inlier_thr,
const unsigned iterations)
{
const af::dim4 idims = x_src.dims();
const unsigned nsamples = idims[0];
unsigned iter = iterations;
Array<float> err = createEmptyArray<float>(af::dim4());
if (htype == AF_HOMOGRAPHY_LMEDS) {
iter = ::::min(iter, (unsigned)(log(1.f - LMEDSConfidence) / log(1.f - pow(1.f - LMEDSOutlierRatio, 4.f))));
err = createValueArray<float>(af::dim4(nsamples, iter), FLT_MAX);
}
af::dim4 rdims(4, iter);
Array<float> frnd = randu<float>(rdims);
Array<float> fctr = createValueArray<float>(rdims, (float)nsamples);
Array<float> rnd = arithOp<float, af_mul_t>(frnd, fctr, rdims);
Array<T> tmpH = createValueArray<T>(af::dim4(9, iter), (T)0);
bestH = createValueArray<T>(af::dim4(3, 3), (T)0);
return kernel::computeH<T>(bestH, tmpH, err,
x_src, y_src, x_dst, y_dst,
rnd, iter, nsamples, inlier_thr, htype);
}
#define INSTANTIATE(T) \
template int homography<T>(Array<T> &H, \
const Array<float> &x_src, const Array<float> &y_src, \
const Array<float> &x_dst, const Array<float> &y_dst, \
const af_homography_type htype, const float inlier_thr, \
const unsigned iterations);
INSTANTIATE(float )
INSTANTIATE(double)
}
| c0bfe0b45913fb1983d2b106edc7fd7ee2c3287e.cu | /*******************************************************
* Copyright (c) 2015, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/dim4.hpp>
#include <af/defines.h>
#include <ArrayInfo.hpp>
#include <Array.hpp>
#include <err_cuda.hpp>
#include <handle.hpp>
#include <arith.hpp>
#include <random.hpp>
#include <kernel/homography.hpp>
#include <algorithm>
#include <iostream>
#include <cfloat>
using af::dim4;
namespace cuda
{
#define RANSACConfidence 0.99f
#define LMEDSConfidence 0.99f
#define LMEDSOutlierRatio 0.4f
template<typename T>
int homography(Array<T> &bestH,
const Array<float> &x_src,
const Array<float> &y_src,
const Array<float> &x_dst,
const Array<float> &y_dst,
const af_homography_type htype,
const float inlier_thr,
const unsigned iterations)
{
const af::dim4 idims = x_src.dims();
const unsigned nsamples = idims[0];
unsigned iter = iterations;
Array<float> err = createEmptyArray<float>(af::dim4());
if (htype == AF_HOMOGRAPHY_LMEDS) {
iter = ::std::min(iter, (unsigned)(log(1.f - LMEDSConfidence) / log(1.f - pow(1.f - LMEDSOutlierRatio, 4.f))));
err = createValueArray<float>(af::dim4(nsamples, iter), FLT_MAX);
}
af::dim4 rdims(4, iter);
Array<float> frnd = randu<float>(rdims);
Array<float> fctr = createValueArray<float>(rdims, (float)nsamples);
Array<float> rnd = arithOp<float, af_mul_t>(frnd, fctr, rdims);
Array<T> tmpH = createValueArray<T>(af::dim4(9, iter), (T)0);
bestH = createValueArray<T>(af::dim4(3, 3), (T)0);
return kernel::computeH<T>(bestH, tmpH, err,
x_src, y_src, x_dst, y_dst,
rnd, iter, nsamples, inlier_thr, htype);
}
#define INSTANTIATE(T) \
template int homography<T>(Array<T> &H, \
const Array<float> &x_src, const Array<float> &y_src, \
const Array<float> &x_dst, const Array<float> &y_dst, \
const af_homography_type htype, const float inlier_thr, \
const unsigned iterations);
INSTANTIATE(float )
INSTANTIATE(double)
}
|
a79954460ff40203fc978371848f46d00e6e3e00.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <assert.h>
#include <string>
#include <map>
#include <fstream>
#include <sys/time.h>
#include <cupti.h>
#include <sassi/sassi-core.hpp>
#include <sassi/sassi-regs.hpp>
#include <sassi/sassi-memory.hpp>
#include "sassi/sassi-opcodes.h"
#include "sassi_intrinsics.h"
#include "sassi_dictionary.hpp"
#include "sassi_lazyallocator.hpp"
#include "error_injector.h"
std::map<std::string, int> knameCount;
std::ofstream ofs;
#if TIMING
struct timeval start, end;
float mTotalTime = 0;
#endif
// This function will be called before every SASS instruction gets executed
__device__ void sassi_before_handler(SASSIBeforeParams* bp, SASSIMemoryParams *mp, SASSIRegisterParams *rp) {
#if EMPTY_HANDLER
return;
#endif
if (bp->GetInstrWillExecute()) {
profile_instructions(bp, mp, rp);
} else {
profile_will_not_execute_instructions();
}
}
// This function will be exected before a kernel is launced
static void onKernelEntry(const CUpti_CallbackData *cbInfo) {
reset_profiling_counters(); // reset profiling counters
#if TIMING
gettimeofday(&start, NULL);
#endif
}
// This function will be exected after the kernel exits
static void onKernelExit(const CUpti_CallbackData *cbInfo) {
hipError_t * error = (hipError_t*) cbInfo->functionReturnValue;
if ( (*error) != hipSuccess ) {
printf("Kernel Exit Error: %d", (*error));
}
// print per thread counters
std::string kName = cbInfo->symbolName; // name of kernel
if (knameCount.find(kName) == knameCount.end()) {
knameCount[kName] = 0;
} else {
knameCount[kName] += 1;
}
char numstr[21]; // enough to hold all numbers up to 64-bits
sprintf(numstr, "%d", knameCount[kName]); // convert int to string
if (INJ_DEBUG_LIGHT) {
printf("%s: count=%d\n", kName.c_str(), knameCount[kName]);
}
ofs << kName << ":" << numstr;
for (int i=0; i<NUM_INST_TYPES; i++) {
ofs << ":" << injCountersInstType[i] ;
}
ofs << ":" << opWillNotExecuteCount; // print the number of operations that will not execute
for (int i=0; i<SASSI_NUM_OPCODES; i++) {
ofs << ":" << opCounters[i] ;
}
ofs << "\n";
#if TIMING
gettimeofday(&end, NULL);
long seconds, useconds;
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
float mTime = ((seconds) * 1000 + useconds/1000.0);
printf("\nTime for %s: %f ms\n", cbInfo->symbolName, mTime);
mTotalTime += mTime;
#endif
}
static void sassi_init()
{
if (INJ_DEBUG_LIGHT)
printf("Writing to filename:%s\n", profileFilename.c_str());
ofs.open(profileFilename.c_str(), std::ofstream::out);
ofs << get_profile_format();
}
static void sassi_finalize(sassi::lazy_allocator::device_reset_reason reason)
{
ofs.close();
#if TIMING
printf("\nTotal kernel time: %f ms\n", mTotalTime);
#endif
}
static sassi::lazy_allocator profilerInit(sassi_init, sassi_finalize, onKernelEntry, onKernelExit);
| a79954460ff40203fc978371848f46d00e6e3e00.cu | /* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <assert.h>
#include <string>
#include <map>
#include <fstream>
#include <sys/time.h>
#include <cupti.h>
#include <sassi/sassi-core.hpp>
#include <sassi/sassi-regs.hpp>
#include <sassi/sassi-memory.hpp>
#include "sassi/sassi-opcodes.h"
#include "sassi_intrinsics.h"
#include "sassi_dictionary.hpp"
#include "sassi_lazyallocator.hpp"
#include "error_injector.h"
std::map<std::string, int> knameCount;
std::ofstream ofs;
#if TIMING
struct timeval start, end;
float mTotalTime = 0;
#endif
// This function will be called before every SASS instruction gets executed
__device__ void sassi_before_handler(SASSIBeforeParams* bp, SASSIMemoryParams *mp, SASSIRegisterParams *rp) {
#if EMPTY_HANDLER
return;
#endif
if (bp->GetInstrWillExecute()) {
profile_instructions(bp, mp, rp);
} else {
profile_will_not_execute_instructions();
}
}
// This function will be exected before a kernel is launced
static void onKernelEntry(const CUpti_CallbackData *cbInfo) {
reset_profiling_counters(); // reset profiling counters
#if TIMING
gettimeofday(&start, NULL);
#endif
}
// This function will be exected after the kernel exits
static void onKernelExit(const CUpti_CallbackData *cbInfo) {
cudaError_t * error = (cudaError_t*) cbInfo->functionReturnValue;
if ( (*error) != cudaSuccess ) {
printf("Kernel Exit Error: %d", (*error));
}
// print per thread counters
std::string kName = cbInfo->symbolName; // name of kernel
if (knameCount.find(kName) == knameCount.end()) {
knameCount[kName] = 0;
} else {
knameCount[kName] += 1;
}
char numstr[21]; // enough to hold all numbers up to 64-bits
sprintf(numstr, "%d", knameCount[kName]); // convert int to string
if (INJ_DEBUG_LIGHT) {
printf("%s: count=%d\n", kName.c_str(), knameCount[kName]);
}
ofs << kName << ":" << numstr;
for (int i=0; i<NUM_INST_TYPES; i++) {
ofs << ":" << injCountersInstType[i] ;
}
ofs << ":" << opWillNotExecuteCount; // print the number of operations that will not execute
for (int i=0; i<SASSI_NUM_OPCODES; i++) {
ofs << ":" << opCounters[i] ;
}
ofs << "\n";
#if TIMING
gettimeofday(&end, NULL);
long seconds, useconds;
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
float mTime = ((seconds) * 1000 + useconds/1000.0);
printf("\nTime for %s: %f ms\n", cbInfo->symbolName, mTime);
mTotalTime += mTime;
#endif
}
static void sassi_init()
{
if (INJ_DEBUG_LIGHT)
printf("Writing to filename:%s\n", profileFilename.c_str());
ofs.open(profileFilename.c_str(), std::ofstream::out);
ofs << get_profile_format();
}
static void sassi_finalize(sassi::lazy_allocator::device_reset_reason reason)
{
ofs.close();
#if TIMING
printf("\nTotal kernel time: %f ms\n", mTotalTime);
#endif
}
static sassi::lazy_allocator profilerInit(sassi_init, sassi_finalize, onKernelEntry, onKernelExit);
|
775a8bfa1a4a19cdea155f21e81b3f1dbd13caaa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include <cutil.h>
#define TILE_WIDTH 64
#define WIDTH_PER_THREAD 8
#define SW TILE_WIDTH/WIDTH_PER_THREAD
#define N 2048
texture<float, 2, hipReadModeElementType> tex_A;
texture<float, 2, hipReadModeElementType> tex_B;
surface<void, 2> surf_C;
void err_handling(hipError_t *err, const char *str)
{
if (*err != hipSuccess) {
printf("%s\n", str);
exit(EXIT_FAILURE);
}
}
__global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n)
{
__shared__ float sA_bf[2][8*64];
__shared__ float sB_bf[2][8*64];
float *A_pref, *A_now;
float *B_pref, *B_now;
int x = threadIdx.x;
int y = threadIdx.y;
int bx = blockIdx.x*64;
int by = blockIdx.y*64;
int id = y*8+x;
int inv_id = (id%32)/4*8 + id%4 + (id < 32 ? 0 : 4);
int glbA_id = by + inv_id;
int glbB_id = bx + inv_id;
int row = by + y*8;
int col = bx + x*8;
float a[8];
float b[8];
float c00 = 0.0; float c01 = 0.0; float c02 = 0.0; float c03 = 0.0, c04 = 0.0; float c05 = 0.0; float c06 = 0.0; float c07 = 0.0;
float c10 = 0.0; float c11 = 0.0; float c12 = 0.0; float c13 = 0.0, c14 = 0.0; float c15 = 0.0; float c16 = 0.0; float c17 = 0.0;
float c20 = 0.0; float c21 = 0.0; float c22 = 0.0; float c23 = 0.0, c24 = 0.0; float c25 = 0.0; float c26 = 0.0; float c27 = 0.0;
float c30 = 0.0; float c31 = 0.0; float c32 = 0.0; float c33 = 0.0, c34 = 0.0; float c35 = 0.0; float c36 = 0.0; float c37 = 0.0;
float c40 = 0.0; float c41 = 0.0; float c42 = 0.0; float c43 = 0.0, c44 = 0.0; float c45 = 0.0; float c46 = 0.0; float c47 = 0.0;
float c50 = 0.0; float c51 = 0.0; float c52 = 0.0; float c53 = 0.0, c54 = 0.0; float c55 = 0.0; float c56 = 0.0; float c57 = 0.0;
float c60 = 0.0; float c61 = 0.0; float c62 = 0.0; float c63 = 0.0, c64 = 0.0; float c65 = 0.0; float c66 = 0.0; float c67 = 0.0;
float c70 = 0.0; float c71 = 0.0; float c72 = 0.0; float c73 = 0.0, c74 = 0.0; float c75 = 0.0; float c76 = 0.0; float c77 = 0.0;
sA_bf[0][0*64+id] = tex2D(tex_A, 0, glbA_id);
sA_bf[0][1*64+id] = tex2D(tex_A, 1, glbA_id);
sA_bf[0][2*64+id] = tex2D(tex_A, 2, glbA_id);
sA_bf[0][3*64+id] = tex2D(tex_A, 3, glbA_id);
sA_bf[0][4*64+id] = tex2D(tex_A, 4, glbA_id);
sA_bf[0][5*64+id] = tex2D(tex_A, 5, glbA_id);
sA_bf[0][6*64+id] = tex2D(tex_A, 6, glbA_id);
sA_bf[0][7*64+id] = tex2D(tex_A, 7, glbA_id);
sB_bf[0][0*64+id] = tex2D(tex_B, glbB_id, 0);
sB_bf[0][1*64+id] = tex2D(tex_B, glbB_id, 1);
sB_bf[0][2*64+id] = tex2D(tex_B, glbB_id, 2);
sB_bf[0][3*64+id] = tex2D(tex_B, glbB_id, 3);
sB_bf[0][4*64+id] = tex2D(tex_B, glbB_id, 4);
sB_bf[0][5*64+id] = tex2D(tex_B, glbB_id, 5);
sB_bf[0][6*64+id] = tex2D(tex_B, glbB_id, 6);
sB_bf[0][7*64+id] = tex2D(tex_B, glbB_id, 7);
A_pref = sA_bf[1];
B_pref = sB_bf[1];
A_now = sA_bf[0];
B_now = sB_bf[0];
int track_bf = 0;
for (int t = 8; t < k; t += 8) {
__syncthreads();
A_pref[0*64+id] = tex2D(tex_A, t , glbA_id);
A_pref[1*64+id] = tex2D(tex_A, t+1, glbA_id);
A_pref[2*64+id] = tex2D(tex_A, t+2, glbA_id);
A_pref[3*64+id] = tex2D(tex_A, t+3, glbA_id);
A_pref[4*64+id] = tex2D(tex_A, t+4, glbA_id);
A_pref[5*64+id] = tex2D(tex_A, t+5, glbA_id);
A_pref[6*64+id] = tex2D(tex_A, t+6, glbA_id);
A_pref[7*64+id] = tex2D(tex_A, t+7, glbA_id);
B_pref[0*64+id] = tex2D(tex_B, glbB_id, t );
B_pref[1*64+id] = tex2D(tex_B, glbB_id, t+1);
B_pref[2*64+id] = tex2D(tex_B, glbB_id, t+2);
B_pref[3*64+id] = tex2D(tex_B, glbB_id, t+3);
B_pref[4*64+id] = tex2D(tex_B, glbB_id, t+4);
B_pref[5*64+id] = tex2D(tex_B, glbB_id, t+5);
B_pref[6*64+id] = tex2D(tex_B, glbB_id, t+6);
B_pref[7*64+id] = tex2D(tex_B, glbB_id, t+7);
#pragma unroll
for (int i = 0; i < 8; ++i) {
int base = i * 16;
((float4*)a)[0] = ((float4*)A_now)[base+y];
((float4*)a)[1] = ((float4*)A_now)[base+y+8];
((float4*)b)[0] = ((float4*)B_now)[base+x];
((float4*)b)[1] = ((float4*)B_now)[base+x+8];
c00 += a[0] * b[0];
c01 += a[0] * b[1];
c02 += a[0] * b[2];
c03 += a[0] * b[3];
c04 += a[0] * b[4];
c05 += a[0] * b[5];
c06 += a[0] * b[6];
c07 += a[0] * b[7];
c10 += a[1] * b[0];
c11 += a[1] * b[1];
c12 += a[1] * b[2];
c13 += a[1] * b[3];
c14 += a[1] * b[4];
c15 += a[1] * b[5];
c16 += a[1] * b[6];
c17 += a[1] * b[7];
c20 += a[2] * b[0];
c21 += a[2] * b[1];
c22 += a[2] * b[2];
c23 += a[2] * b[3];
c24 += a[2] * b[4];
c25 += a[2] * b[5];
c26 += a[2] * b[6];
c27 += a[2] * b[7];
c30 += a[3] * b[0];
c31 += a[3] * b[1];
c32 += a[3] * b[2];
c33 += a[3] * b[3];
c34 += a[3] * b[4];
c35 += a[3] * b[5];
c36 += a[3] * b[6];
c37 += a[3] * b[7];
c40 += a[4] * b[0];
c41 += a[4] * b[1];
c42 += a[4] * b[2];
c43 += a[4] * b[3];
c44 += a[4] * b[4];
c45 += a[4] * b[5];
c46 += a[4] * b[6];
c47 += a[4] * b[7];
c50 += a[5] * b[0];
c51 += a[5] * b[1];
c52 += a[5] * b[2];
c53 += a[5] * b[3];
c54 += a[5] * b[4];
c55 += a[5] * b[5];
c56 += a[5] * b[6];
c57 += a[5] * b[7];
c60 += a[6] * b[0];
c61 += a[6] * b[1];
c62 += a[6] * b[2];
c63 += a[6] * b[3];
c64 += a[6] * b[4];
c65 += a[6] * b[5];
c66 += a[6] * b[6];
c67 += a[6] * b[7];
c70 += a[7] * b[0];
c71 += a[7] * b[1];
c72 += a[7] * b[2];
c73 += a[7] * b[3];
c74 += a[7] * b[4];
c75 += a[7] * b[5];
c76 += a[7] * b[6];
c77 += a[7] * b[7];
}
A_pref = sA_bf[track_bf];
B_pref = sB_bf[track_bf];
A_now = sA_bf[1-track_bf];
B_now = sB_bf[1-track_bf];
track_bf = 1 - track_bf;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 8; ++i) {
int base = i * 16;
((float4*)a)[0] = ((float4*)A_now)[base+y];
((float4*)a)[1] = ((float4*)A_now)[base+y+8];
((float4*)b)[0] = ((float4*)B_now)[base+x];
((float4*)b)[1] = ((float4*)B_now)[base+x+8];
c00 += a[0] * b[0];
c01 += a[0] * b[1];
c02 += a[0] * b[2];
c03 += a[0] * b[3];
c04 += a[0] * b[4];
c05 += a[0] * b[5];
c06 += a[0] * b[6];
c07 += a[0] * b[7];
c10 += a[1] * b[0];
c11 += a[1] * b[1];
c12 += a[1] * b[2];
c13 += a[1] * b[3];
c14 += a[1] * b[4];
c15 += a[1] * b[5];
c16 += a[1] * b[6];
c17 += a[1] * b[7];
c20 += a[2] * b[0];
c21 += a[2] * b[1];
c22 += a[2] * b[2];
c23 += a[2] * b[3];
c24 += a[2] * b[4];
c25 += a[2] * b[5];
c26 += a[2] * b[6];
c27 += a[2] * b[7];
c30 += a[3] * b[0];
c31 += a[3] * b[1];
c32 += a[3] * b[2];
c33 += a[3] * b[3];
c34 += a[3] * b[4];
c35 += a[3] * b[5];
c36 += a[3] * b[6];
c37 += a[3] * b[7];
c40 += a[4] * b[0];
c41 += a[4] * b[1];
c42 += a[4] * b[2];
c43 += a[4] * b[3];
c44 += a[4] * b[4];
c45 += a[4] * b[5];
c46 += a[4] * b[6];
c47 += a[4] * b[7];
c50 += a[5] * b[0];
c51 += a[5] * b[1];
c52 += a[5] * b[2];
c53 += a[5] * b[3];
c54 += a[5] * b[4];
c55 += a[5] * b[5];
c56 += a[5] * b[6];
c57 += a[5] * b[7];
c60 += a[6] * b[0];
c61 += a[6] * b[1];
c62 += a[6] * b[2];
c63 += a[6] * b[3];
c64 += a[6] * b[4];
c65 += a[6] * b[5];
c66 += a[6] * b[6];
c67 += a[6] * b[7];
c70 += a[7] * b[0];
c71 += a[7] * b[1];
c72 += a[7] * b[2];
c73 += a[7] * b[3];
c74 += a[7] * b[4];
c75 += a[7] * b[5];
c76 += a[7] * b[6];
c77 += a[7] * b[7];
}
surf2Dwrite(c00, surf_C, (col )*sizeof(float), row , hipBoundaryModeZero);
surf2Dwrite(c01, surf_C, (col+1)*sizeof(float), row , hipBoundaryModeZero);
surf2Dwrite(c02, surf_C, (col+2)*sizeof(float), row , hipBoundaryModeZero);
surf2Dwrite(c03, surf_C, (col+3)*sizeof(float), row , hipBoundaryModeZero);
surf2Dwrite(c04, surf_C, (col+4)*sizeof(float), row , hipBoundaryModeZero);
surf2Dwrite(c05, surf_C, (col+5)*sizeof(float), row , hipBoundaryModeZero);
surf2Dwrite(c06, surf_C, (col+6)*sizeof(float), row , hipBoundaryModeZero);
surf2Dwrite(c07, surf_C, (col+7)*sizeof(float), row , hipBoundaryModeZero);
surf2Dwrite(c10, surf_C, (col )*sizeof(float), row+1 , hipBoundaryModeZero);
surf2Dwrite(c11, surf_C, (col+1)*sizeof(float), row+1 , hipBoundaryModeZero);
surf2Dwrite(c12, surf_C, (col+2)*sizeof(float), row+1 , hipBoundaryModeZero);
surf2Dwrite(c13, surf_C, (col+3)*sizeof(float), row+1 , hipBoundaryModeZero);
surf2Dwrite(c14, surf_C, (col+4)*sizeof(float), row+1 , hipBoundaryModeZero);
surf2Dwrite(c15, surf_C, (col+5)*sizeof(float), row+1 , hipBoundaryModeZero);
surf2Dwrite(c16, surf_C, (col+6)*sizeof(float), row+1 , hipBoundaryModeZero);
surf2Dwrite(c17, surf_C, (col+7)*sizeof(float), row+1 , hipBoundaryModeZero);
surf2Dwrite(c20, surf_C, (col )*sizeof(float), row+2 , hipBoundaryModeZero);
surf2Dwrite(c21, surf_C, (col+1)*sizeof(float), row+2 , hipBoundaryModeZero);
surf2Dwrite(c22, surf_C, (col+2)*sizeof(float), row+2 , hipBoundaryModeZero);
surf2Dwrite(c23, surf_C, (col+3)*sizeof(float), row+2 , hipBoundaryModeZero);
surf2Dwrite(c24, surf_C, (col+4)*sizeof(float), row+2 , hipBoundaryModeZero);
surf2Dwrite(c25, surf_C, (col+5)*sizeof(float), row+2 , hipBoundaryModeZero);
surf2Dwrite(c26, surf_C, (col+6)*sizeof(float), row+2 , hipBoundaryModeZero);
surf2Dwrite(c27, surf_C, (col+7)*sizeof(float), row+2 , hipBoundaryModeZero);
surf2Dwrite(c30, surf_C, (col )*sizeof(float), row+3 , hipBoundaryModeZero);
surf2Dwrite(c31, surf_C, (col+1)*sizeof(float), row+3 , hipBoundaryModeZero);
surf2Dwrite(c32, surf_C, (col+2)*sizeof(float), row+3 , hipBoundaryModeZero);
surf2Dwrite(c33, surf_C, (col+3)*sizeof(float), row+3 , hipBoundaryModeZero);
surf2Dwrite(c34, surf_C, (col+4)*sizeof(float), row+3 , hipBoundaryModeZero);
surf2Dwrite(c35, surf_C, (col+5)*sizeof(float), row+3 , hipBoundaryModeZero);
surf2Dwrite(c36, surf_C, (col+6)*sizeof(float), row+3 , hipBoundaryModeZero);
surf2Dwrite(c37, surf_C, (col+7)*sizeof(float), row+3 , hipBoundaryModeZero);
surf2Dwrite(c40, surf_C, (col )*sizeof(float), row+4 , hipBoundaryModeZero);
surf2Dwrite(c41, surf_C, (col+1)*sizeof(float), row+4 , hipBoundaryModeZero);
surf2Dwrite(c42, surf_C, (col+2)*sizeof(float), row+4 , hipBoundaryModeZero);
surf2Dwrite(c43, surf_C, (col+3)*sizeof(float), row+4 , hipBoundaryModeZero);
surf2Dwrite(c44, surf_C, (col+4)*sizeof(float), row+4 , hipBoundaryModeZero);
surf2Dwrite(c45, surf_C, (col+5)*sizeof(float), row+4 , hipBoundaryModeZero);
surf2Dwrite(c46, surf_C, (col+6)*sizeof(float), row+4 , hipBoundaryModeZero);
surf2Dwrite(c47, surf_C, (col+7)*sizeof(float), row+4 , hipBoundaryModeZero);
surf2Dwrite(c50, surf_C, (col )*sizeof(float), row+5 , hipBoundaryModeZero);
surf2Dwrite(c51, surf_C, (col+1)*sizeof(float), row+5 , hipBoundaryModeZero);
surf2Dwrite(c52, surf_C, (col+2)*sizeof(float), row+5 , hipBoundaryModeZero);
surf2Dwrite(c53, surf_C, (col+3)*sizeof(float), row+5 , hipBoundaryModeZero);
surf2Dwrite(c54, surf_C, (col+4)*sizeof(float), row+5 , hipBoundaryModeZero);
surf2Dwrite(c55, surf_C, (col+5)*sizeof(float), row+5 , hipBoundaryModeZero);
surf2Dwrite(c56, surf_C, (col+6)*sizeof(float), row+5 , hipBoundaryModeZero);
surf2Dwrite(c57, surf_C, (col+7)*sizeof(float), row+5 , hipBoundaryModeZero);
surf2Dwrite(c60, surf_C, (col )*sizeof(float), row+6 , hipBoundaryModeZero);
surf2Dwrite(c61, surf_C, (col+1)*sizeof(float), row+6 , hipBoundaryModeZero);
surf2Dwrite(c62, surf_C, (col+2)*sizeof(float), row+6 , hipBoundaryModeZero);
surf2Dwrite(c63, surf_C, (col+3)*sizeof(float), row+6 , hipBoundaryModeZero);
surf2Dwrite(c64, surf_C, (col+4)*sizeof(float), row+6 , hipBoundaryModeZero);
surf2Dwrite(c65, surf_C, (col+5)*sizeof(float), row+6 , hipBoundaryModeZero);
surf2Dwrite(c66, surf_C, (col+6)*sizeof(float), row+6 , hipBoundaryModeZero);
surf2Dwrite(c67, surf_C, (col+7)*sizeof(float), row+6 , hipBoundaryModeZero);
surf2Dwrite(c70, surf_C, (col )*sizeof(float), row+7 , hipBoundaryModeZero);
surf2Dwrite(c71, surf_C, (col+1)*sizeof(float), row+7 , hipBoundaryModeZero);
surf2Dwrite(c72, surf_C, (col+2)*sizeof(float), row+7 , hipBoundaryModeZero);
surf2Dwrite(c73, surf_C, (col+3)*sizeof(float), row+7 , hipBoundaryModeZero);
surf2Dwrite(c74, surf_C, (col+4)*sizeof(float), row+7 , hipBoundaryModeZero);
surf2Dwrite(c75, surf_C, (col+5)*sizeof(float), row+7 , hipBoundaryModeZero);
surf2Dwrite(c76, surf_C, (col+6)*sizeof(float), row+7 , hipBoundaryModeZero);
surf2Dwrite(c77, surf_C, (col+7)*sizeof(float), row+7 , hipBoundaryModeZero);
}
int main(void)
{
hipError_t err = hipSuccess;
int m = N;
int n = N;
int k = N;
float *A = (float*)malloc(m*k*sizeof(float));
float *B = (float*)malloc(k*n*sizeof(float));
float *C = (float*)malloc(m*n*sizeof(float));
if (A == NULL || B == NULL || C == NULL) {
printf("allocate host error!\n");
return 1;
}
for (int i = 0; i < m*k; ++i) {
A[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX;
}
for (int i = 0; i < k*n; ++i) {
B[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX;
}
float *dev_A = NULL;
float *dev_B = NULL;
float *dev_C = NULL;
err = hipMalloc((void**)&dev_A, m*k*sizeof(float));
err_handling(&err, "allocate devecie error A!");
err = hipMalloc((void**)&dev_B, k*n*sizeof(float));
err_handling(&err, "allocate devecie error B!");
err = hipMalloc((void**)&dev_C, m*n*sizeof(float));
err_handling(&err, "allocate devecie error C!");
err = hipMemcpy(dev_A, A, m*k*sizeof(float), hipMemcpyHostToDevice);
err_handling(&err, "memcpy to A error!");
err = hipMemcpy(dev_B, B, k*n*sizeof(float), hipMemcpyHostToDevice);
err_handling(&err, "memcpy to B error!");
hipChannelFormatDesc ADesc = hipCreateChannelDesc<float>();
hipChannelFormatDesc BDesc = hipCreateChannelDesc<float>();
hipChannelFormatDesc CDesc = hipCreateChannelDesc<float>();
hipArray *A_array, *B_array, *C_array;
hipMallocArray(&A_array, &ADesc, k, m);
hipMallocArray(&B_array, &BDesc, n, k);
hipMallocArray(&C_array, &CDesc, n, m, hipArraySurfaceLoadStore);
hipMemcpyToArray(A_array, 0, 0, A, m*k*sizeof(float), hipMemcpyHostToDevice);
hipMemcpyToArray(B_array, 0, 0, B, k*n*sizeof(float), hipMemcpyHostToDevice);
hipBindTextureToArray(tex_A, A_array);
hipBindTextureToArray(tex_B, B_array);
hipBindSurfaceToArray(surf_C, C_array);
tex_A.addressMode[0] = hipAddressModeBorder;
tex_A.addressMode[1] = hipAddressModeBorder;
tex_B.addressMode[0] = hipAddressModeBorder;
tex_B.addressMode[1] = hipAddressModeBorder;
dim3 dimGrid((n-1)/TILE_WIDTH+1, (m-1)/TILE_WIDTH+1, 1);
dim3 dimBlock(TILE_WIDTH/WIDTH_PER_THREAD, TILE_WIDTH/WIDTH_PER_THREAD, 1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( matMul), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_A, dev_B, dev_C, m, k, n);
hipEventRecord(stop, 0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
float time_elapsed = 0;
hipEventElapsedTime(&time_elapsed, start, stop);
printf("%fms\n", time_elapsed);
err = cudaMemcpyFromArray(C, C_array, 0, 0, m*n*sizeof(float), hipMemcpyDeviceToHost);
//err = hipMemcpy(C, dev_C, m*n*sizeof(float), hipMemcpyDeviceToHost);
err_handling(&err, "memcpy to host C error!");
FILE *fp = fopen("gpu.out", "w");
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
fprintf(fp, "%f\n", C[i*N+j]);
}
}
fclose(fp);
err = hipFree(dev_A);
err_handling(&err, "mem free A error!");
err = hipFree(dev_B);
err_handling(&err, "mem free B error!");
err = hipFree(dev_C);
err_handling(&err, "mem free C error!");
err = hipDeviceReset();
err_handling(&err, "device reset error!");
return 0;
}
| 775a8bfa1a4a19cdea155f21e81b3f1dbd13caaa.cu | #include <stdio.h>
#include <cuda_runtime.h>
//#include <cutil.h>
#define TILE_WIDTH 64
#define WIDTH_PER_THREAD 8
#define SW TILE_WIDTH/WIDTH_PER_THREAD
#define N 2048
texture<float, 2, cudaReadModeElementType> tex_A;
texture<float, 2, cudaReadModeElementType> tex_B;
surface<void, 2> surf_C;
void err_handling(cudaError_t *err, const char *str)
{
if (*err != cudaSuccess) {
printf("%s\n", str);
exit(EXIT_FAILURE);
}
}
__global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n)
{
__shared__ float sA_bf[2][8*64];
__shared__ float sB_bf[2][8*64];
float *A_pref, *A_now;
float *B_pref, *B_now;
int x = threadIdx.x;
int y = threadIdx.y;
int bx = blockIdx.x*64;
int by = blockIdx.y*64;
int id = y*8+x;
int inv_id = (id%32)/4*8 + id%4 + (id < 32 ? 0 : 4);
int glbA_id = by + inv_id;
int glbB_id = bx + inv_id;
int row = by + y*8;
int col = bx + x*8;
float a[8];
float b[8];
float c00 = 0.0; float c01 = 0.0; float c02 = 0.0; float c03 = 0.0, c04 = 0.0; float c05 = 0.0; float c06 = 0.0; float c07 = 0.0;
float c10 = 0.0; float c11 = 0.0; float c12 = 0.0; float c13 = 0.0, c14 = 0.0; float c15 = 0.0; float c16 = 0.0; float c17 = 0.0;
float c20 = 0.0; float c21 = 0.0; float c22 = 0.0; float c23 = 0.0, c24 = 0.0; float c25 = 0.0; float c26 = 0.0; float c27 = 0.0;
float c30 = 0.0; float c31 = 0.0; float c32 = 0.0; float c33 = 0.0, c34 = 0.0; float c35 = 0.0; float c36 = 0.0; float c37 = 0.0;
float c40 = 0.0; float c41 = 0.0; float c42 = 0.0; float c43 = 0.0, c44 = 0.0; float c45 = 0.0; float c46 = 0.0; float c47 = 0.0;
float c50 = 0.0; float c51 = 0.0; float c52 = 0.0; float c53 = 0.0, c54 = 0.0; float c55 = 0.0; float c56 = 0.0; float c57 = 0.0;
float c60 = 0.0; float c61 = 0.0; float c62 = 0.0; float c63 = 0.0, c64 = 0.0; float c65 = 0.0; float c66 = 0.0; float c67 = 0.0;
float c70 = 0.0; float c71 = 0.0; float c72 = 0.0; float c73 = 0.0, c74 = 0.0; float c75 = 0.0; float c76 = 0.0; float c77 = 0.0;
sA_bf[0][0*64+id] = tex2D(tex_A, 0, glbA_id);
sA_bf[0][1*64+id] = tex2D(tex_A, 1, glbA_id);
sA_bf[0][2*64+id] = tex2D(tex_A, 2, glbA_id);
sA_bf[0][3*64+id] = tex2D(tex_A, 3, glbA_id);
sA_bf[0][4*64+id] = tex2D(tex_A, 4, glbA_id);
sA_bf[0][5*64+id] = tex2D(tex_A, 5, glbA_id);
sA_bf[0][6*64+id] = tex2D(tex_A, 6, glbA_id);
sA_bf[0][7*64+id] = tex2D(tex_A, 7, glbA_id);
sB_bf[0][0*64+id] = tex2D(tex_B, glbB_id, 0);
sB_bf[0][1*64+id] = tex2D(tex_B, glbB_id, 1);
sB_bf[0][2*64+id] = tex2D(tex_B, glbB_id, 2);
sB_bf[0][3*64+id] = tex2D(tex_B, glbB_id, 3);
sB_bf[0][4*64+id] = tex2D(tex_B, glbB_id, 4);
sB_bf[0][5*64+id] = tex2D(tex_B, glbB_id, 5);
sB_bf[0][6*64+id] = tex2D(tex_B, glbB_id, 6);
sB_bf[0][7*64+id] = tex2D(tex_B, glbB_id, 7);
A_pref = sA_bf[1];
B_pref = sB_bf[1];
A_now = sA_bf[0];
B_now = sB_bf[0];
int track_bf = 0;
for (int t = 8; t < k; t += 8) {
__syncthreads();
A_pref[0*64+id] = tex2D(tex_A, t , glbA_id);
A_pref[1*64+id] = tex2D(tex_A, t+1, glbA_id);
A_pref[2*64+id] = tex2D(tex_A, t+2, glbA_id);
A_pref[3*64+id] = tex2D(tex_A, t+3, glbA_id);
A_pref[4*64+id] = tex2D(tex_A, t+4, glbA_id);
A_pref[5*64+id] = tex2D(tex_A, t+5, glbA_id);
A_pref[6*64+id] = tex2D(tex_A, t+6, glbA_id);
A_pref[7*64+id] = tex2D(tex_A, t+7, glbA_id);
B_pref[0*64+id] = tex2D(tex_B, glbB_id, t );
B_pref[1*64+id] = tex2D(tex_B, glbB_id, t+1);
B_pref[2*64+id] = tex2D(tex_B, glbB_id, t+2);
B_pref[3*64+id] = tex2D(tex_B, glbB_id, t+3);
B_pref[4*64+id] = tex2D(tex_B, glbB_id, t+4);
B_pref[5*64+id] = tex2D(tex_B, glbB_id, t+5);
B_pref[6*64+id] = tex2D(tex_B, glbB_id, t+6);
B_pref[7*64+id] = tex2D(tex_B, glbB_id, t+7);
#pragma unroll
for (int i = 0; i < 8; ++i) {
int base = i * 16;
((float4*)a)[0] = ((float4*)A_now)[base+y];
((float4*)a)[1] = ((float4*)A_now)[base+y+8];
((float4*)b)[0] = ((float4*)B_now)[base+x];
((float4*)b)[1] = ((float4*)B_now)[base+x+8];
c00 += a[0] * b[0];
c01 += a[0] * b[1];
c02 += a[0] * b[2];
c03 += a[0] * b[3];
c04 += a[0] * b[4];
c05 += a[0] * b[5];
c06 += a[0] * b[6];
c07 += a[0] * b[7];
c10 += a[1] * b[0];
c11 += a[1] * b[1];
c12 += a[1] * b[2];
c13 += a[1] * b[3];
c14 += a[1] * b[4];
c15 += a[1] * b[5];
c16 += a[1] * b[6];
c17 += a[1] * b[7];
c20 += a[2] * b[0];
c21 += a[2] * b[1];
c22 += a[2] * b[2];
c23 += a[2] * b[3];
c24 += a[2] * b[4];
c25 += a[2] * b[5];
c26 += a[2] * b[6];
c27 += a[2] * b[7];
c30 += a[3] * b[0];
c31 += a[3] * b[1];
c32 += a[3] * b[2];
c33 += a[3] * b[3];
c34 += a[3] * b[4];
c35 += a[3] * b[5];
c36 += a[3] * b[6];
c37 += a[3] * b[7];
c40 += a[4] * b[0];
c41 += a[4] * b[1];
c42 += a[4] * b[2];
c43 += a[4] * b[3];
c44 += a[4] * b[4];
c45 += a[4] * b[5];
c46 += a[4] * b[6];
c47 += a[4] * b[7];
c50 += a[5] * b[0];
c51 += a[5] * b[1];
c52 += a[5] * b[2];
c53 += a[5] * b[3];
c54 += a[5] * b[4];
c55 += a[5] * b[5];
c56 += a[5] * b[6];
c57 += a[5] * b[7];
c60 += a[6] * b[0];
c61 += a[6] * b[1];
c62 += a[6] * b[2];
c63 += a[6] * b[3];
c64 += a[6] * b[4];
c65 += a[6] * b[5];
c66 += a[6] * b[6];
c67 += a[6] * b[7];
c70 += a[7] * b[0];
c71 += a[7] * b[1];
c72 += a[7] * b[2];
c73 += a[7] * b[3];
c74 += a[7] * b[4];
c75 += a[7] * b[5];
c76 += a[7] * b[6];
c77 += a[7] * b[7];
}
A_pref = sA_bf[track_bf];
B_pref = sB_bf[track_bf];
A_now = sA_bf[1-track_bf];
B_now = sB_bf[1-track_bf];
track_bf = 1 - track_bf;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 8; ++i) {
int base = i * 16;
((float4*)a)[0] = ((float4*)A_now)[base+y];
((float4*)a)[1] = ((float4*)A_now)[base+y+8];
((float4*)b)[0] = ((float4*)B_now)[base+x];
((float4*)b)[1] = ((float4*)B_now)[base+x+8];
c00 += a[0] * b[0];
c01 += a[0] * b[1];
c02 += a[0] * b[2];
c03 += a[0] * b[3];
c04 += a[0] * b[4];
c05 += a[0] * b[5];
c06 += a[0] * b[6];
c07 += a[0] * b[7];
c10 += a[1] * b[0];
c11 += a[1] * b[1];
c12 += a[1] * b[2];
c13 += a[1] * b[3];
c14 += a[1] * b[4];
c15 += a[1] * b[5];
c16 += a[1] * b[6];
c17 += a[1] * b[7];
c20 += a[2] * b[0];
c21 += a[2] * b[1];
c22 += a[2] * b[2];
c23 += a[2] * b[3];
c24 += a[2] * b[4];
c25 += a[2] * b[5];
c26 += a[2] * b[6];
c27 += a[2] * b[7];
c30 += a[3] * b[0];
c31 += a[3] * b[1];
c32 += a[3] * b[2];
c33 += a[3] * b[3];
c34 += a[3] * b[4];
c35 += a[3] * b[5];
c36 += a[3] * b[6];
c37 += a[3] * b[7];
c40 += a[4] * b[0];
c41 += a[4] * b[1];
c42 += a[4] * b[2];
c43 += a[4] * b[3];
c44 += a[4] * b[4];
c45 += a[4] * b[5];
c46 += a[4] * b[6];
c47 += a[4] * b[7];
c50 += a[5] * b[0];
c51 += a[5] * b[1];
c52 += a[5] * b[2];
c53 += a[5] * b[3];
c54 += a[5] * b[4];
c55 += a[5] * b[5];
c56 += a[5] * b[6];
c57 += a[5] * b[7];
c60 += a[6] * b[0];
c61 += a[6] * b[1];
c62 += a[6] * b[2];
c63 += a[6] * b[3];
c64 += a[6] * b[4];
c65 += a[6] * b[5];
c66 += a[6] * b[6];
c67 += a[6] * b[7];
c70 += a[7] * b[0];
c71 += a[7] * b[1];
c72 += a[7] * b[2];
c73 += a[7] * b[3];
c74 += a[7] * b[4];
c75 += a[7] * b[5];
c76 += a[7] * b[6];
c77 += a[7] * b[7];
}
surf2Dwrite(c00, surf_C, (col )*sizeof(float), row , cudaBoundaryModeZero);
surf2Dwrite(c01, surf_C, (col+1)*sizeof(float), row , cudaBoundaryModeZero);
surf2Dwrite(c02, surf_C, (col+2)*sizeof(float), row , cudaBoundaryModeZero);
surf2Dwrite(c03, surf_C, (col+3)*sizeof(float), row , cudaBoundaryModeZero);
surf2Dwrite(c04, surf_C, (col+4)*sizeof(float), row , cudaBoundaryModeZero);
surf2Dwrite(c05, surf_C, (col+5)*sizeof(float), row , cudaBoundaryModeZero);
surf2Dwrite(c06, surf_C, (col+6)*sizeof(float), row , cudaBoundaryModeZero);
surf2Dwrite(c07, surf_C, (col+7)*sizeof(float), row , cudaBoundaryModeZero);
surf2Dwrite(c10, surf_C, (col )*sizeof(float), row+1 , cudaBoundaryModeZero);
surf2Dwrite(c11, surf_C, (col+1)*sizeof(float), row+1 , cudaBoundaryModeZero);
surf2Dwrite(c12, surf_C, (col+2)*sizeof(float), row+1 , cudaBoundaryModeZero);
surf2Dwrite(c13, surf_C, (col+3)*sizeof(float), row+1 , cudaBoundaryModeZero);
surf2Dwrite(c14, surf_C, (col+4)*sizeof(float), row+1 , cudaBoundaryModeZero);
surf2Dwrite(c15, surf_C, (col+5)*sizeof(float), row+1 , cudaBoundaryModeZero);
surf2Dwrite(c16, surf_C, (col+6)*sizeof(float), row+1 , cudaBoundaryModeZero);
surf2Dwrite(c17, surf_C, (col+7)*sizeof(float), row+1 , cudaBoundaryModeZero);
surf2Dwrite(c20, surf_C, (col )*sizeof(float), row+2 , cudaBoundaryModeZero);
surf2Dwrite(c21, surf_C, (col+1)*sizeof(float), row+2 , cudaBoundaryModeZero);
surf2Dwrite(c22, surf_C, (col+2)*sizeof(float), row+2 , cudaBoundaryModeZero);
surf2Dwrite(c23, surf_C, (col+3)*sizeof(float), row+2 , cudaBoundaryModeZero);
surf2Dwrite(c24, surf_C, (col+4)*sizeof(float), row+2 , cudaBoundaryModeZero);
surf2Dwrite(c25, surf_C, (col+5)*sizeof(float), row+2 , cudaBoundaryModeZero);
surf2Dwrite(c26, surf_C, (col+6)*sizeof(float), row+2 , cudaBoundaryModeZero);
surf2Dwrite(c27, surf_C, (col+7)*sizeof(float), row+2 , cudaBoundaryModeZero);
surf2Dwrite(c30, surf_C, (col )*sizeof(float), row+3 , cudaBoundaryModeZero);
surf2Dwrite(c31, surf_C, (col+1)*sizeof(float), row+3 , cudaBoundaryModeZero);
surf2Dwrite(c32, surf_C, (col+2)*sizeof(float), row+3 , cudaBoundaryModeZero);
surf2Dwrite(c33, surf_C, (col+3)*sizeof(float), row+3 , cudaBoundaryModeZero);
surf2Dwrite(c34, surf_C, (col+4)*sizeof(float), row+3 , cudaBoundaryModeZero);
surf2Dwrite(c35, surf_C, (col+5)*sizeof(float), row+3 , cudaBoundaryModeZero);
surf2Dwrite(c36, surf_C, (col+6)*sizeof(float), row+3 , cudaBoundaryModeZero);
surf2Dwrite(c37, surf_C, (col+7)*sizeof(float), row+3 , cudaBoundaryModeZero);
surf2Dwrite(c40, surf_C, (col )*sizeof(float), row+4 , cudaBoundaryModeZero);
surf2Dwrite(c41, surf_C, (col+1)*sizeof(float), row+4 , cudaBoundaryModeZero);
surf2Dwrite(c42, surf_C, (col+2)*sizeof(float), row+4 , cudaBoundaryModeZero);
surf2Dwrite(c43, surf_C, (col+3)*sizeof(float), row+4 , cudaBoundaryModeZero);
surf2Dwrite(c44, surf_C, (col+4)*sizeof(float), row+4 , cudaBoundaryModeZero);
surf2Dwrite(c45, surf_C, (col+5)*sizeof(float), row+4 , cudaBoundaryModeZero);
surf2Dwrite(c46, surf_C, (col+6)*sizeof(float), row+4 , cudaBoundaryModeZero);
surf2Dwrite(c47, surf_C, (col+7)*sizeof(float), row+4 , cudaBoundaryModeZero);
surf2Dwrite(c50, surf_C, (col )*sizeof(float), row+5 , cudaBoundaryModeZero);
surf2Dwrite(c51, surf_C, (col+1)*sizeof(float), row+5 , cudaBoundaryModeZero);
surf2Dwrite(c52, surf_C, (col+2)*sizeof(float), row+5 , cudaBoundaryModeZero);
surf2Dwrite(c53, surf_C, (col+3)*sizeof(float), row+5 , cudaBoundaryModeZero);
surf2Dwrite(c54, surf_C, (col+4)*sizeof(float), row+5 , cudaBoundaryModeZero);
surf2Dwrite(c55, surf_C, (col+5)*sizeof(float), row+5 , cudaBoundaryModeZero);
surf2Dwrite(c56, surf_C, (col+6)*sizeof(float), row+5 , cudaBoundaryModeZero);
surf2Dwrite(c57, surf_C, (col+7)*sizeof(float), row+5 , cudaBoundaryModeZero);
surf2Dwrite(c60, surf_C, (col )*sizeof(float), row+6 , cudaBoundaryModeZero);
surf2Dwrite(c61, surf_C, (col+1)*sizeof(float), row+6 , cudaBoundaryModeZero);
surf2Dwrite(c62, surf_C, (col+2)*sizeof(float), row+6 , cudaBoundaryModeZero);
surf2Dwrite(c63, surf_C, (col+3)*sizeof(float), row+6 , cudaBoundaryModeZero);
surf2Dwrite(c64, surf_C, (col+4)*sizeof(float), row+6 , cudaBoundaryModeZero);
surf2Dwrite(c65, surf_C, (col+5)*sizeof(float), row+6 , cudaBoundaryModeZero);
surf2Dwrite(c66, surf_C, (col+6)*sizeof(float), row+6 , cudaBoundaryModeZero);
surf2Dwrite(c67, surf_C, (col+7)*sizeof(float), row+6 , cudaBoundaryModeZero);
surf2Dwrite(c70, surf_C, (col )*sizeof(float), row+7 , cudaBoundaryModeZero);
surf2Dwrite(c71, surf_C, (col+1)*sizeof(float), row+7 , cudaBoundaryModeZero);
surf2Dwrite(c72, surf_C, (col+2)*sizeof(float), row+7 , cudaBoundaryModeZero);
surf2Dwrite(c73, surf_C, (col+3)*sizeof(float), row+7 , cudaBoundaryModeZero);
surf2Dwrite(c74, surf_C, (col+4)*sizeof(float), row+7 , cudaBoundaryModeZero);
surf2Dwrite(c75, surf_C, (col+5)*sizeof(float), row+7 , cudaBoundaryModeZero);
surf2Dwrite(c76, surf_C, (col+6)*sizeof(float), row+7 , cudaBoundaryModeZero);
surf2Dwrite(c77, surf_C, (col+7)*sizeof(float), row+7 , cudaBoundaryModeZero);
}
int main(void)
{
cudaError_t err = cudaSuccess;
int m = N;
int n = N;
int k = N;
float *A = (float*)malloc(m*k*sizeof(float));
float *B = (float*)malloc(k*n*sizeof(float));
float *C = (float*)malloc(m*n*sizeof(float));
if (A == NULL || B == NULL || C == NULL) {
printf("allocate host error!\n");
return 1;
}
for (int i = 0; i < m*k; ++i) {
A[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX;
}
for (int i = 0; i < k*n; ++i) {
B[i] = rand()/(float)RAND_MAX - rand()/(float)RAND_MAX;
}
float *dev_A = NULL;
float *dev_B = NULL;
float *dev_C = NULL;
err = cudaMalloc((void**)&dev_A, m*k*sizeof(float));
err_handling(&err, "allocate devecie error A!");
err = cudaMalloc((void**)&dev_B, k*n*sizeof(float));
err_handling(&err, "allocate devecie error B!");
err = cudaMalloc((void**)&dev_C, m*n*sizeof(float));
err_handling(&err, "allocate devecie error C!");
err = cudaMemcpy(dev_A, A, m*k*sizeof(float), cudaMemcpyHostToDevice);
err_handling(&err, "memcpy to A error!");
err = cudaMemcpy(dev_B, B, k*n*sizeof(float), cudaMemcpyHostToDevice);
err_handling(&err, "memcpy to B error!");
cudaChannelFormatDesc ADesc = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc BDesc = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc CDesc = cudaCreateChannelDesc<float>();
cudaArray *A_array, *B_array, *C_array;
cudaMallocArray(&A_array, &ADesc, k, m);
cudaMallocArray(&B_array, &BDesc, n, k);
cudaMallocArray(&C_array, &CDesc, n, m, cudaArraySurfaceLoadStore);
cudaMemcpyToArray(A_array, 0, 0, A, m*k*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToArray(B_array, 0, 0, B, k*n*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTextureToArray(tex_A, A_array);
cudaBindTextureToArray(tex_B, B_array);
cudaBindSurfaceToArray(surf_C, C_array);
tex_A.addressMode[0] = cudaAddressModeBorder;
tex_A.addressMode[1] = cudaAddressModeBorder;
tex_B.addressMode[0] = cudaAddressModeBorder;
tex_B.addressMode[1] = cudaAddressModeBorder;
dim3 dimGrid((n-1)/TILE_WIDTH+1, (m-1)/TILE_WIDTH+1, 1);
dim3 dimBlock(TILE_WIDTH/WIDTH_PER_THREAD, TILE_WIDTH/WIDTH_PER_THREAD, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matMul<<<dimGrid, dimBlock>>>(dev_A, dev_B, dev_C, m, k, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
float time_elapsed = 0;
cudaEventElapsedTime(&time_elapsed, start, stop);
printf("%fms\n", time_elapsed);
err = cudaMemcpyFromArray(C, C_array, 0, 0, m*n*sizeof(float), cudaMemcpyDeviceToHost);
//err = cudaMemcpy(C, dev_C, m*n*sizeof(float), cudaMemcpyDeviceToHost);
err_handling(&err, "memcpy to host C error!");
FILE *fp = fopen("gpu.out", "w");
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
fprintf(fp, "%f\n", C[i*N+j]);
}
}
fclose(fp);
err = cudaFree(dev_A);
err_handling(&err, "mem free A error!");
err = cudaFree(dev_B);
err_handling(&err, "mem free B error!");
err = cudaFree(dev_C);
err_handling(&err, "mem free C error!");
err = cudaDeviceReset();
err_handling(&err, "device reset error!");
return 0;
}
|
39eaf2801033e0e942d211e34ae45be8fd15469a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(-FLT_MAX);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, hipcub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
OPERATOR_NEEDS_FEATURE(kernel_.size() >=2 && kernel_.size() <=3,
"Cudnn pooling only supports 4d and 5d tensor");
if (legacy_pad_ != LegacyPadding::CAFFE_LEGACY_POOLING) {
for (int i = 0; i < kernel_.size(); ++i) {
OPERATOR_NEEDS_FEATURE(
pads_[i] == pads_[kernel_.size() + i],
"The current padding scheme leads to unequal padding on the left "
"and right, which is not supported by cudnn.");
}
}
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
hipLaunchKernelGGL(( global_maxpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
hipLaunchKernelGGL(( global_maxpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
| 39eaf2801033e0e942d211e34ae45be8fd15469a.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <cub/cub.cuh>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(-FLT_MAX);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, cub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
OPERATOR_NEEDS_FEATURE(kernel_.size() >=2 && kernel_.size() <=3,
"Cudnn pooling only supports 4d and 5d tensor");
if (legacy_pad_ != LegacyPadding::CAFFE_LEGACY_POOLING) {
for (int i = 0; i < kernel_.size(); ++i) {
OPERATOR_NEEDS_FEATURE(
pads_[i] == pads_[kernel_.size() + i],
"The current padding scheme leads to unequal padding on the left "
"and right, which is not supported by cudnn.");
}
}
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
global_maxpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
global_maxpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
|
a8046bb1d291e544d87e5486c9cacf81c998b999.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef USE_CUDNN
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/cudnn_batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Ftype, typename Btype>
void CuDNNBatchNormLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0] == bottom[0] ?
private_top_->mutable_gpu_data<Ftype>() : top[0]->mutable_gpu_data<Ftype>();
double epsilon = this->eps_;
const void* scale_data;
const void* bias_data;
void* global_mean;
void* global_var;
void* save_mean;
void* save_inv_var;
if (is_type<Ftype>(FLOAT16)) {
if (this->phase_ == TRAIN) {
global_mean = this->blobs_[0]->template mutable_gpu_data<float>();
global_var = this->blobs_[1]->template mutable_gpu_data<float>();
save_mean = save_mean_->template mutable_gpu_data<float>();
save_inv_var = save_inv_var_->template mutable_gpu_data<float>();
} else {
global_mean = (void *) this->blobs_[0]->template gpu_data<float>();
global_var = (void *) this->blobs_[1]->template gpu_data<float>();
}
if (this->scale_bias_) {
scale_data = this->blobs_[3]->template gpu_data<float>();
bias_data = this->blobs_[4]->template gpu_data<float>();
} else {
scale_data = scale_ones_->template gpu_data<float>();
bias_data = bias_zeros_->template gpu_data<float>();
}
} else {
if (this->phase_ == TRAIN) {
global_mean = this->blobs_[0]->template mutable_gpu_data<Ftype>();
global_var = this->blobs_[1]->template mutable_gpu_data<Ftype>();
save_mean = save_mean_->template mutable_gpu_data<Ftype>();
save_inv_var = save_inv_var_->template mutable_gpu_data<Ftype>();
} else {
global_mean = (void *) this->blobs_[0]->template gpu_data<Ftype>();
global_var = (void *) this->blobs_[1]->template gpu_data<Ftype>();
}
if (this->scale_bias_) {
scale_data = this->blobs_[3]->template gpu_data<Ftype>();
bias_data = this->blobs_[4]->template gpu_data<Ftype>();
} else {
scale_data = scale_ones_->template gpu_data<Ftype>();
bias_data = bias_zeros_->template gpu_data<Ftype>();
}
}
if (this->phase_ == TRAIN) {
double factor = 1. - this->moving_average_fraction_;
if (this->iter() == 0) {
factor = 1.0;
}
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(Caffe::cudnn_handle(0), mode_,
cudnn::dataType<Ftype>::one, cudnn::dataType<Ftype>::zero,
fwd_bottom_desc_, bottom_data, fwd_top_desc_, top_data,
fwd_scale_bias_mean_var_desc_, scale_data, bias_data,
factor, global_mean, global_var, epsilon, save_mean, save_inv_var));
} else if (this->phase_ == TEST) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(Caffe::cudnn_handle(0),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Ftype>::one, cudnn::dataType<Ftype>::zero,
fwd_bottom_desc_, bottom_data, fwd_top_desc_, top_data,
fwd_scale_bias_mean_var_desc_, scale_data, bias_data,
global_mean, global_var, epsilon));
} else {
LOG(FATAL) << "Unknown phase";
}
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream()));
if (top[0] == bottom[0]) {
private_bottom_->CopyDataFrom(*bottom[0]);
top[0]->CopyDataFrom(*private_top_);
}
}
template <typename Ftype, typename Btype>
void CuDNNBatchNormLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* bottom_data = top[0] == bottom[0] ?
private_bottom_->gpu_data<Btype>() : bottom[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
double epsilon = this->eps_;
const void* save_mean;
const void* save_inv_var;
const void* scale_data;
void* scale_diff;
void* bias_diff;
if (is_type<Btype>(FLOAT16)) {
save_mean = save_mean_->template gpu_data<float>();
save_inv_var = save_inv_var_->template gpu_data<float>();
if (this->scale_bias_) {
scale_data = this->blobs_[3]->template gpu_data<float>();
scale_diff = this->blobs_[3]->template mutable_gpu_diff<float>();
bias_diff = this->blobs_[4]->template mutable_gpu_diff<float>();
} else {
scale_data = scale_ones_->template gpu_data<float>();
scale_diff = scale_ones_->template mutable_gpu_diff<float>();
bias_diff = bias_zeros_->template mutable_gpu_diff<float>();
}
} else {
save_mean = save_mean_->template gpu_data<Btype>();
save_inv_var = save_inv_var_->template gpu_data<Btype>();
if (this->scale_bias_) {
scale_data = this->blobs_[3]->template gpu_data<Btype>();
scale_diff = this->blobs_[3]->template mutable_gpu_diff<Btype>();
bias_diff = this->blobs_[4]->template mutable_gpu_diff<Btype>();
} else {
scale_data = scale_ones_->template gpu_data<Btype>();
scale_diff = scale_ones_->template mutable_gpu_diff<Btype>();
bias_diff = bias_zeros_->template mutable_gpu_diff<Btype>();
}
}
if (top[0] == bottom[0]) {
// copy diff from top to private_top
private_top_->CopyDiffFrom(*top[0]);
top_diff = private_top_->gpu_diff<Btype>();
}
CUDNN_CHECK(cudnnBatchNormalizationBackward(Caffe::cudnn_handle(0), mode_,
cudnn::dataType<Btype>::one, cudnn::dataType<Btype>::zero,
cudnn::dataType<Btype>::one, cudnn::dataType<Btype>::one,
bwd_bottom_desc_, bottom_data, bwd_bottom_desc_, top_diff, bwd_bottom_desc_, bottom_diff,
bwd_scale_bias_mean_var_desc_, scale_data, scale_diff, bias_diff,
epsilon, save_mean, save_inv_var));
CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream(0)));
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNBatchNormLayer);
} // namespace caffe
#endif
| a8046bb1d291e544d87e5486c9cacf81c998b999.cu | #ifdef USE_CUDNN
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/cudnn_batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Ftype, typename Btype>
void CuDNNBatchNormLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0] == bottom[0] ?
private_top_->mutable_gpu_data<Ftype>() : top[0]->mutable_gpu_data<Ftype>();
double epsilon = this->eps_;
const void* scale_data;
const void* bias_data;
void* global_mean;
void* global_var;
void* save_mean;
void* save_inv_var;
if (is_type<Ftype>(FLOAT16)) {
if (this->phase_ == TRAIN) {
global_mean = this->blobs_[0]->template mutable_gpu_data<float>();
global_var = this->blobs_[1]->template mutable_gpu_data<float>();
save_mean = save_mean_->template mutable_gpu_data<float>();
save_inv_var = save_inv_var_->template mutable_gpu_data<float>();
} else {
global_mean = (void *) this->blobs_[0]->template gpu_data<float>();
global_var = (void *) this->blobs_[1]->template gpu_data<float>();
}
if (this->scale_bias_) {
scale_data = this->blobs_[3]->template gpu_data<float>();
bias_data = this->blobs_[4]->template gpu_data<float>();
} else {
scale_data = scale_ones_->template gpu_data<float>();
bias_data = bias_zeros_->template gpu_data<float>();
}
} else {
if (this->phase_ == TRAIN) {
global_mean = this->blobs_[0]->template mutable_gpu_data<Ftype>();
global_var = this->blobs_[1]->template mutable_gpu_data<Ftype>();
save_mean = save_mean_->template mutable_gpu_data<Ftype>();
save_inv_var = save_inv_var_->template mutable_gpu_data<Ftype>();
} else {
global_mean = (void *) this->blobs_[0]->template gpu_data<Ftype>();
global_var = (void *) this->blobs_[1]->template gpu_data<Ftype>();
}
if (this->scale_bias_) {
scale_data = this->blobs_[3]->template gpu_data<Ftype>();
bias_data = this->blobs_[4]->template gpu_data<Ftype>();
} else {
scale_data = scale_ones_->template gpu_data<Ftype>();
bias_data = bias_zeros_->template gpu_data<Ftype>();
}
}
if (this->phase_ == TRAIN) {
double factor = 1. - this->moving_average_fraction_;
if (this->iter() == 0) {
factor = 1.0;
}
CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(Caffe::cudnn_handle(0), mode_,
cudnn::dataType<Ftype>::one, cudnn::dataType<Ftype>::zero,
fwd_bottom_desc_, bottom_data, fwd_top_desc_, top_data,
fwd_scale_bias_mean_var_desc_, scale_data, bias_data,
factor, global_mean, global_var, epsilon, save_mean, save_inv_var));
} else if (this->phase_ == TEST) {
CUDNN_CHECK(cudnnBatchNormalizationForwardInference(Caffe::cudnn_handle(0),
CUDNN_BATCHNORM_SPATIAL,
cudnn::dataType<Ftype>::one, cudnn::dataType<Ftype>::zero,
fwd_bottom_desc_, bottom_data, fwd_top_desc_, top_data,
fwd_scale_bias_mean_var_desc_, scale_data, bias_data,
global_mean, global_var, epsilon));
} else {
LOG(FATAL) << "Unknown phase";
}
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream()));
if (top[0] == bottom[0]) {
private_bottom_->CopyDataFrom(*bottom[0]);
top[0]->CopyDataFrom(*private_top_);
}
}
template <typename Ftype, typename Btype>
void CuDNNBatchNormLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
const Btype* top_diff = top[0]->gpu_diff<Btype>();
const Btype* bottom_data = top[0] == bottom[0] ?
private_bottom_->gpu_data<Btype>() : bottom[0]->gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
double epsilon = this->eps_;
const void* save_mean;
const void* save_inv_var;
const void* scale_data;
void* scale_diff;
void* bias_diff;
if (is_type<Btype>(FLOAT16)) {
save_mean = save_mean_->template gpu_data<float>();
save_inv_var = save_inv_var_->template gpu_data<float>();
if (this->scale_bias_) {
scale_data = this->blobs_[3]->template gpu_data<float>();
scale_diff = this->blobs_[3]->template mutable_gpu_diff<float>();
bias_diff = this->blobs_[4]->template mutable_gpu_diff<float>();
} else {
scale_data = scale_ones_->template gpu_data<float>();
scale_diff = scale_ones_->template mutable_gpu_diff<float>();
bias_diff = bias_zeros_->template mutable_gpu_diff<float>();
}
} else {
save_mean = save_mean_->template gpu_data<Btype>();
save_inv_var = save_inv_var_->template gpu_data<Btype>();
if (this->scale_bias_) {
scale_data = this->blobs_[3]->template gpu_data<Btype>();
scale_diff = this->blobs_[3]->template mutable_gpu_diff<Btype>();
bias_diff = this->blobs_[4]->template mutable_gpu_diff<Btype>();
} else {
scale_data = scale_ones_->template gpu_data<Btype>();
scale_diff = scale_ones_->template mutable_gpu_diff<Btype>();
bias_diff = bias_zeros_->template mutable_gpu_diff<Btype>();
}
}
if (top[0] == bottom[0]) {
// copy diff from top to private_top
private_top_->CopyDiffFrom(*top[0]);
top_diff = private_top_->gpu_diff<Btype>();
}
CUDNN_CHECK(cudnnBatchNormalizationBackward(Caffe::cudnn_handle(0), mode_,
cudnn::dataType<Btype>::one, cudnn::dataType<Btype>::zero,
cudnn::dataType<Btype>::one, cudnn::dataType<Btype>::one,
bwd_bottom_desc_, bottom_data, bwd_bottom_desc_, top_diff, bwd_bottom_desc_, bottom_diff,
bwd_scale_bias_mean_var_desc_, scale_data, scale_diff, bias_diff,
epsilon, save_mean, save_inv_var));
CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream(0)));
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(CuDNNBatchNormLayer);
} // namespace caffe
#endif
|
1ef0fe0cea200f9a2d5ec1e272e3da05ff1d5dbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/vol2col.h"
#include "paddle/fluid/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <class T>
__global__ void vol2col(int num_kernels, const T* data_vol, int depth,
int height, int width, int dilation_d, int dilation_h,
int dilation_w, int filter_depth, int filter_height,
int filter_width, int stride_depth, int stride_height,
int stride_width, int padding_depth, int padding_height,
int padding_width, int output_detph, int output_height,
int output_width, T* data_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
int w_out = index % output_width;
int h_out = (index / output_width) % output_height;
int d_out = (index / output_width / output_height) % output_detph;
int channel_in = index / output_width / output_height / output_detph;
int channel_out = channel_in * filter_depth * filter_height * filter_width;
int w_in = w_out * stride_width - padding_width;
int h_in = h_out * stride_height - padding_height;
int d_in = d_out * stride_depth - padding_depth;
data_col += ((channel_out * output_detph + d_out) * output_height + h_out) *
output_width +
w_out;
data_vol += ((channel_in * depth + d_in) * height + h_in) * width + w_in;
for (int k = 0; k < filter_depth; ++k) {
for (int i = 0; i < filter_height; ++i) {
for (int j = 0; j < filter_width; ++j) {
int d = d_in + k * dilation_d;
int h = h_in + i * dilation_h;
int w = w_in + j * dilation_w;
int col_idx = (k * dilation_d * height + i * dilation_h) * width +
j * dilation_w;
*data_col = (d >= 0 && d < depth && h >= 0 && h < height && w >= 0 &&
w < width)
? data_vol[col_idx]
: 0;
data_col += output_detph * output_height * output_width;
}
}
}
}
}
/*
* im = [input_channels,intpu_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <class T>
class Vol2ColFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& vol,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* col) const {
PADDLE_ENFORCE(vol.dims().size() == 4);
PADDLE_ENFORCE(col->dims().size() == 7);
int input_channels = vol.dims()[0];
int input_depth = vol.dims()[1];
int input_height = vol.dims()[2];
int input_width = vol.dims()[3];
int filter_depth = col->dims()[1];
int filter_height = col->dims()[2];
int filter_width = col->dims()[3];
int output_depth = col->dims()[4];
int output_height = col->dims()[5];
int output_width = col->dims()[6];
PADDLE_ENFORCE_EQ((input_depth + 2 * paddings[0] -
((dilations[0] * (filter_depth - 1) + 1))) /
strides[0] +
1,
output_depth,
"input_depth and output_depth are "
"Mismatching.");
PADDLE_ENFORCE_EQ((input_height + 2 * paddings[1] -
((dilations[1] * (filter_height - 1) + 1))) /
strides[1] +
1,
output_height,
"input_height and output_height are "
"Mismatching.");
PADDLE_ENFORCE_EQ((input_width + 2 * paddings[2] -
((dilations[2] * (filter_width - 1) + 1))) /
strides[2] +
1,
output_width,
"input_width and output_width are "
"Mismatching.");
int num_outputs =
input_channels * output_depth * output_height * output_width;
const int threads = 1024;
const int blocks = (num_outputs + 1024 - 1) / 1024;
hipLaunchKernelGGL(( vol2col<T>), dim3(blocks), dim3(threads), 0, context.stream(),
num_outputs, vol.data<T>(), input_depth, input_height, input_width,
dilations[0], dilations[1], dilations[2], filter_depth, filter_height,
filter_width, strides[0], strides[1], strides[2], paddings[0],
paddings[1], paddings[2], output_depth, output_height, output_width,
col->data<T>());
}
};
template <class T>
__global__ void col2vol(int num_kernels, const T* data_col, int depth,
int height, int width, int dilation_d, int dilation_h,
int dilation_w, int filter_depth, int filter_height,
int filter_width, int stride_depth, int stride_height,
int stride_width, int padding_depth, int padding_height,
int padding_width, int output_detph, int output_height,
int output_width, T* data_vol) {
const int d_filter_depth = dilation_d * (filter_depth - 1) + 1;
const int d_filter_height = dilation_h * (filter_height - 1) + 1;
const int d_filter_width = dilation_w * (filter_width - 1) + 1;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
T src_val = 0;
int w = index % width + padding_width;
int h = (index / width) % height + padding_height;
int d = (index / width / height) % depth + padding_depth;
int c = index / width / height / depth;
// compute the start and end of the output
int w_col_start =
(w < d_filter_width) ? 0 : (w - d_filter_width) / stride_width + 1;
int w_col_end = min(w / stride_width + 1, output_width);
int h_col_start =
(h < d_filter_height) ? 0 : (h - d_filter_height) / stride_height + 1;
int h_col_end = min(h / stride_height + 1, output_height);
int d_col_start =
(d < d_filter_depth) ? 0 : (d - d_filter_depth) / stride_depth + 1;
int d_col_end = min(d / stride_depth + 1, output_detph);
for (int d_col = d_col_start; d_col < d_col_end; ++d_col) {
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int d_off = (d - d_col * stride_depth);
int h_off = (h - h_col * stride_height);
int w_off = (w - w_col * stride_width);
if (d_off % dilation_d == 0 && h_off % dilation_h == 0 &&
w_off % dilation_w == 0) {
d_off /= dilation_d;
h_off /= dilation_h;
w_off /= dilation_w;
int data_col_index =
(((((c * filter_depth + d_off) * filter_height + h_off) *
filter_width +
w_off)));
data_col_index =
((data_col_index * output_detph + d_col) * output_height +
h_col) *
output_width +
w_col;
src_val += data_col[data_col_index];
}
}
}
}
data_vol[index] = src_val;
}
}
/*
* im = [input_channels, input_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <class T>
class Col2VolFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& col,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* vol) const {
PADDLE_ENFORCE(vol->dims().size() == 4);
PADDLE_ENFORCE(col.dims().size() == 7);
int input_channels = vol->dims()[0];
int input_depth = vol->dims()[1];
int input_height = vol->dims()[2];
int input_width = vol->dims()[3];
int filter_depth = col.dims()[1];
int filter_height = col.dims()[2];
int filter_width = col.dims()[3];
int output_depth = col.dims()[4];
int output_height = col.dims()[5];
int output_width = col.dims()[6];
PADDLE_ENFORCE_EQ((input_depth + 2 * paddings[0] -
((dilations[0] * (filter_depth - 1) + 1))) /
strides[0] +
1,
output_depth,
"input_depth and output_depth are "
"Mismatching.");
PADDLE_ENFORCE_EQ((input_height + 2 * paddings[1] -
((dilations[1] * (filter_height - 1) + 1))) /
strides[1] +
1,
output_height,
"input_height and output_height are "
"Mismatching.");
PADDLE_ENFORCE_EQ((input_width + 2 * paddings[2] -
((dilations[2] * (filter_width - 1) + 1))) /
strides[2] +
1,
output_width,
"input_width and output_width are "
"Mismatching.");
int num_kernels = input_channels * input_depth * input_height * input_width;
const int threads = 1024;
const int blocks = (num_kernels + 1024 - 1) / 1024;
hipLaunchKernelGGL(( col2vol<T>), dim3(blocks), dim3(threads), 0, context.stream(),
num_kernels, col.data<T>(), input_depth, input_height, input_width,
dilations[0], dilations[1], dilations[2], filter_depth, filter_height,
filter_width, strides[0], strides[1], strides[2], paddings[0],
paddings[1], paddings[2], output_depth, output_height, output_width,
vol->data<T>());
}
};
template class Vol2ColFunctor<platform::CUDADeviceContext, float>;
template class Vol2ColFunctor<platform::CUDADeviceContext, double>;
template class Col2VolFunctor<platform::CUDADeviceContext, float>;
template class Col2VolFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 1ef0fe0cea200f9a2d5ec1e272e3da05ff1d5dbc.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/math/vol2col.h"
#include "paddle/fluid/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <class T>
__global__ void vol2col(int num_kernels, const T* data_vol, int depth,
int height, int width, int dilation_d, int dilation_h,
int dilation_w, int filter_depth, int filter_height,
int filter_width, int stride_depth, int stride_height,
int stride_width, int padding_depth, int padding_height,
int padding_width, int output_detph, int output_height,
int output_width, T* data_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
int w_out = index % output_width;
int h_out = (index / output_width) % output_height;
int d_out = (index / output_width / output_height) % output_detph;
int channel_in = index / output_width / output_height / output_detph;
int channel_out = channel_in * filter_depth * filter_height * filter_width;
int w_in = w_out * stride_width - padding_width;
int h_in = h_out * stride_height - padding_height;
int d_in = d_out * stride_depth - padding_depth;
data_col += ((channel_out * output_detph + d_out) * output_height + h_out) *
output_width +
w_out;
data_vol += ((channel_in * depth + d_in) * height + h_in) * width + w_in;
for (int k = 0; k < filter_depth; ++k) {
for (int i = 0; i < filter_height; ++i) {
for (int j = 0; j < filter_width; ++j) {
int d = d_in + k * dilation_d;
int h = h_in + i * dilation_h;
int w = w_in + j * dilation_w;
int col_idx = (k * dilation_d * height + i * dilation_h) * width +
j * dilation_w;
*data_col = (d >= 0 && d < depth && h >= 0 && h < height && w >= 0 &&
w < width)
? data_vol[col_idx]
: 0;
data_col += output_detph * output_height * output_width;
}
}
}
}
}
/*
* im = [input_channels,intpu_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <class T>
class Vol2ColFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& vol,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* col) const {
PADDLE_ENFORCE(vol.dims().size() == 4);
PADDLE_ENFORCE(col->dims().size() == 7);
int input_channels = vol.dims()[0];
int input_depth = vol.dims()[1];
int input_height = vol.dims()[2];
int input_width = vol.dims()[3];
int filter_depth = col->dims()[1];
int filter_height = col->dims()[2];
int filter_width = col->dims()[3];
int output_depth = col->dims()[4];
int output_height = col->dims()[5];
int output_width = col->dims()[6];
PADDLE_ENFORCE_EQ((input_depth + 2 * paddings[0] -
((dilations[0] * (filter_depth - 1) + 1))) /
strides[0] +
1,
output_depth,
"input_depth and output_depth are "
"Mismatching.");
PADDLE_ENFORCE_EQ((input_height + 2 * paddings[1] -
((dilations[1] * (filter_height - 1) + 1))) /
strides[1] +
1,
output_height,
"input_height and output_height are "
"Mismatching.");
PADDLE_ENFORCE_EQ((input_width + 2 * paddings[2] -
((dilations[2] * (filter_width - 1) + 1))) /
strides[2] +
1,
output_width,
"input_width and output_width are "
"Mismatching.");
int num_outputs =
input_channels * output_depth * output_height * output_width;
const int threads = 1024;
const int blocks = (num_outputs + 1024 - 1) / 1024;
vol2col<T><<<blocks, threads, 0, context.stream()>>>(
num_outputs, vol.data<T>(), input_depth, input_height, input_width,
dilations[0], dilations[1], dilations[2], filter_depth, filter_height,
filter_width, strides[0], strides[1], strides[2], paddings[0],
paddings[1], paddings[2], output_depth, output_height, output_width,
col->data<T>());
}
};
template <class T>
__global__ void col2vol(int num_kernels, const T* data_col, int depth,
int height, int width, int dilation_d, int dilation_h,
int dilation_w, int filter_depth, int filter_height,
int filter_width, int stride_depth, int stride_height,
int stride_width, int padding_depth, int padding_height,
int padding_width, int output_detph, int output_height,
int output_width, T* data_vol) {
const int d_filter_depth = dilation_d * (filter_depth - 1) + 1;
const int d_filter_height = dilation_h * (filter_height - 1) + 1;
const int d_filter_width = dilation_w * (filter_width - 1) + 1;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < num_kernels;
index += blockDim.x * gridDim.x) {
T src_val = 0;
int w = index % width + padding_width;
int h = (index / width) % height + padding_height;
int d = (index / width / height) % depth + padding_depth;
int c = index / width / height / depth;
// compute the start and end of the output
int w_col_start =
(w < d_filter_width) ? 0 : (w - d_filter_width) / stride_width + 1;
int w_col_end = min(w / stride_width + 1, output_width);
int h_col_start =
(h < d_filter_height) ? 0 : (h - d_filter_height) / stride_height + 1;
int h_col_end = min(h / stride_height + 1, output_height);
int d_col_start =
(d < d_filter_depth) ? 0 : (d - d_filter_depth) / stride_depth + 1;
int d_col_end = min(d / stride_depth + 1, output_detph);
for (int d_col = d_col_start; d_col < d_col_end; ++d_col) {
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int d_off = (d - d_col * stride_depth);
int h_off = (h - h_col * stride_height);
int w_off = (w - w_col * stride_width);
if (d_off % dilation_d == 0 && h_off % dilation_h == 0 &&
w_off % dilation_w == 0) {
d_off /= dilation_d;
h_off /= dilation_h;
w_off /= dilation_w;
int data_col_index =
(((((c * filter_depth + d_off) * filter_height + h_off) *
filter_width +
w_off)));
data_col_index =
((data_col_index * output_detph + d_col) * output_height +
h_col) *
output_width +
w_col;
src_val += data_col[data_col_index];
}
}
}
}
data_vol[index] = src_val;
}
}
/*
* im = [input_channels, input_depth, input_height, input_width]
* col =
* [input_channels, filter_depth, filter_height, filter_width,
* output_depth, output_height, output_width]
*/
template <class T>
class Col2VolFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& col,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const std::vector<int>& paddings,
framework::Tensor* vol) const {
PADDLE_ENFORCE(vol->dims().size() == 4);
PADDLE_ENFORCE(col.dims().size() == 7);
int input_channels = vol->dims()[0];
int input_depth = vol->dims()[1];
int input_height = vol->dims()[2];
int input_width = vol->dims()[3];
int filter_depth = col.dims()[1];
int filter_height = col.dims()[2];
int filter_width = col.dims()[3];
int output_depth = col.dims()[4];
int output_height = col.dims()[5];
int output_width = col.dims()[6];
PADDLE_ENFORCE_EQ((input_depth + 2 * paddings[0] -
((dilations[0] * (filter_depth - 1) + 1))) /
strides[0] +
1,
output_depth,
"input_depth and output_depth are "
"Mismatching.");
PADDLE_ENFORCE_EQ((input_height + 2 * paddings[1] -
((dilations[1] * (filter_height - 1) + 1))) /
strides[1] +
1,
output_height,
"input_height and output_height are "
"Mismatching.");
PADDLE_ENFORCE_EQ((input_width + 2 * paddings[2] -
((dilations[2] * (filter_width - 1) + 1))) /
strides[2] +
1,
output_width,
"input_width and output_width are "
"Mismatching.");
int num_kernels = input_channels * input_depth * input_height * input_width;
const int threads = 1024;
const int blocks = (num_kernels + 1024 - 1) / 1024;
col2vol<T><<<blocks, threads, 0, context.stream()>>>(
num_kernels, col.data<T>(), input_depth, input_height, input_width,
dilations[0], dilations[1], dilations[2], filter_depth, filter_height,
filter_width, strides[0], strides[1], strides[2], paddings[0],
paddings[1], paddings[2], output_depth, output_height, output_width,
vol->data<T>());
}
};
template class Vol2ColFunctor<platform::CUDADeviceContext, float>;
template class Vol2ColFunctor<platform::CUDADeviceContext, double>;
template class Col2VolFunctor<platform::CUDADeviceContext, float>;
template class Col2VolFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
4f33b1f31a6f2023b66ed61937edca55fd6c9b2a.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHBlas.h>
#include <THH/THHGeneral.h>
#include <TH/THHalf.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPBlas.h>
#include <algorithm>
#include <mutex>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
/* Level 2 */
void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda)
{
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
// TODO: why does Level3 check trans but this doesn't?
if (n <= 1)
*lda = std::max<int64_t>(m, 1);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
hipblasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return HIPBLAS_OP_T;
else if (trans == 'n') return HIPBLAS_OP_N;
else if (trans == 'c') return HIPBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return HIPBLAS_OP_T;
}
}
void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
if(n <= 1)
*ldc = std::max<int64_t>(m, 1);
if(transa_)
{
if(m <= 1)
*lda = std::max<int64_t>(k, 1);
}
else
{
if(k <= 1)
*lda = std::max<int64_t>(m, 1);
}
if(transb_)
{
if(k <= 1)
*ldb = std::max<int64_t>(n, 1);
}
else
{
if(n <= 1)
*ldb = std::max<int64_t>(k, 1);
}
}
// Check https://github.com/pytorch/pytorch/issues/22078
// for information about the bug. We don't know the exact conditions that trigger it,
// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
// necessary condition.
static void checkCuda90Bug(int i_m, int i_n, int i_k)
{
#if TORCH_HIP_VERSION < 9200 && TORCH_HIP_VERSION >= 9000
static std::once_flag alreadyWarned;
const int LIMIT = 1 << 21;
if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major == 5 || prop->major == 6) {
std::call_once(alreadyWarned, []() {
TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
});
}
}
#endif
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
checkCuda90Bug((int)m, (int)n, (int)k);
at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
// In CUDA 8.0, definition of data types for sgemmex changed
#if TORCH_HIP_VERSION < 8000
# define HIP_R_16F HIPBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc)
{
checkCuda90Bug((int)m, (int)n, (int)k);
at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc)
{
at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#if TORCH_HIP_VERSION >= 9010 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB,
at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#ifdef __HIP_PLATFORM_HCC__
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA,
b, rocblas_datatype_f16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC,
c, rocblas_datatype_f16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0));
#else
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#endif // TORCH_HIP_VERSION < 11000
THCublasCheck(hipblasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, HIP_R_16F, (int)lda, strideA,
b, HIP_R_16F, (int)ldb, strideB,
(void*)&fBeta, c, HIP_R_16F, (int)ldc, strideC,
(int)batchCount, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // TORCH_HIP_VERSION < 11000
#endif // __HIP_PLATFORM_HCC__
}
#endif // TORCH_HIP_VERSION or __HIP_PLATFORM_HCC__
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA,
b, rocblas_datatype_bf16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC,
c, rocblas_datatype_bf16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0, NULL, NULL));
}
#endif // __HIP_PLATFORM_HCC__
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
#if TORCH_HIP_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
#if TORCH_HIP_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
| 4f33b1f31a6f2023b66ed61937edca55fd6c9b2a.cu | #include <THC/THCBlas.h>
#include <THC/THCGeneral.h>
#include <TH/THHalf.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDABlas.h>
#include <algorithm>
#include <mutex>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
/* Level 2 */
void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda)
{
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
// TODO: why does Level3 check trans but this doesn't?
if (n <= 1)
*lda = std::max<int64_t>(m, 1);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
cublasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return CUBLAS_OP_T;
else if (trans == 'n') return CUBLAS_OP_N;
else if (trans == 'c') return CUBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return CUBLAS_OP_T;
}
}
void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
if(n <= 1)
*ldc = std::max<int64_t>(m, 1);
if(transa_)
{
if(m <= 1)
*lda = std::max<int64_t>(k, 1);
}
else
{
if(k <= 1)
*lda = std::max<int64_t>(m, 1);
}
if(transb_)
{
if(k <= 1)
*ldb = std::max<int64_t>(n, 1);
}
else
{
if(n <= 1)
*ldb = std::max<int64_t>(k, 1);
}
}
// Check https://github.com/pytorch/pytorch/issues/22078
// for information about the bug. We don't know the exact conditions that trigger it,
// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
// necessary condition.
static void checkCuda90Bug(int i_m, int i_n, int i_k)
{
#if CUDA_VERSION < 9200 && CUDA_VERSION >= 9000
static std::once_flag alreadyWarned;
const int LIMIT = 1 << 21;
if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major == 5 || prop->major == 6) {
std::call_once(alreadyWarned, []() {
TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
});
}
}
#endif
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
checkCuda90Bug((int)m, (int)n, (int)k);
at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
// In CUDA 8.0, definition of data types for sgemmex changed
#if CUDA_VERSION < 8000
# define CUDA_R_16F CUBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc)
{
checkCuda90Bug((int)m, (int)n, (int)k);
at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc)
{
at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#if CUDA_VERSION >= 9010 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB,
at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#ifdef __HIP_PLATFORM_HCC__
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA,
b, rocblas_datatype_f16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC,
c, rocblas_datatype_f16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0));
#else
#if defined(CUDA_VERSION) && CUDA_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#endif // CUDA_VERSION < 11000
THCublasCheck(cublasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, CUDA_R_16F, (int)lda, strideA,
b, CUDA_R_16F, (int)ldb, strideB,
(void*)&fBeta, c, CUDA_R_16F, (int)ldc, strideC,
(int)batchCount, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#if defined(CUDA_VERSION) && CUDA_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // CUDA_VERSION < 11000
#endif // __HIP_PLATFORM_HCC__
}
#endif // CUDA_VERSION or __HIP_PLATFORM_HCC__
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA,
b, rocblas_datatype_bf16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC,
c, rocblas_datatype_bf16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0, NULL, NULL));
}
#endif // __HIP_PLATFORM_HCC__
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
#if CUDA_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
#if CUDA_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
|
7bd883dbd64547ce7b71191ff71ca454ce39896a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 512
__global__ void add_number(int* a, int* b, int* c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(int argc, char **argv){
int *a, *b, *c; // Host copies of variables
int *d_a, *d_b, *d_c; // Device copies of variables
int size = N * sizeof(int);
// Allocation of device's memory
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Allocation of space for variables on host
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setting up input variables
for(int i = 0; i < N; i++){
*(a+i) = i;
*(b+i) = i;
}
// Copy inputs to device memory
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launching kernel on GPU
hipLaunchKernelGGL(( add_number), dim3(1),dim3(N), 0, 0, d_a, d_b, d_c);
// Copy results back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
for(int i = 1; i < N; i++){
printf("%d\n", *(c+i));
}
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 7bd883dbd64547ce7b71191ff71ca454ce39896a.cu | #include <cuda.h>
#include <stdio.h>
#define N 512
__global__ void add_number(int* a, int* b, int* c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(int argc, char **argv){
int *a, *b, *c; // Host copies of variables
int *d_a, *d_b, *d_c; // Device copies of variables
int size = N * sizeof(int);
// Allocation of device's memory
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Allocation of space for variables on host
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setting up input variables
for(int i = 0; i < N; i++){
*(a+i) = i;
*(b+i) = i;
}
// Copy inputs to device memory
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launching kernel on GPU
add_number<<<1,N>>>(d_a, d_b, d_c);
// Copy results back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 1; i < N; i++){
printf("%d\n", *(c+i));
}
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
constraint_store.hip | // !!! This is a file automatically generated by hipify!!!
#include "constraint_store.h"
#include "propagator.h"
#include "constraint.h"
#include "aminoacid.h"
#include "logic_variables.h"
using namespace std;
using namespace Propagator;
//#define CSTORE_DBG_ISOLV
prop_func
func[ c_type_size ] = {
prop_c_sang,
prop_c_k_angle_shuffle,
prop_c_all_dist,
prop_c_k_rang,
prop_c_cg,
prop_c_mang,
prop_c_dist
};
ConstraintStore::ConstraintStore() :
_dbg ( "#log: Constraint Store - " ),
_not_init ( true ),
_constraint_queue ( NULL ),
_dom_events ( NULL ) {
}//-
ConstraintStore::~ConstraintStore() {
if ( !_constraint_queue ) free ( _constraint_queue );
if ( !_dom_events ) free ( _dom_events );
if ( !_already_set_cons ) free ( _already_set_cons );
if ( !_already_set_cons_fix ) free ( _already_set_cons_fix );
}//-
void
ConstraintStore::init() {
_dom_events = (int*) malloc( 2 * sizeof(int) );
_constraint_queue = (int*) malloc( gh_params.num_cons * sizeof(int) );
_already_set_cons = (int*) malloc( gh_params.num_cons * sizeof(int) );
_already_set_cons_fix = (int*) malloc( gh_params.num_cons * sizeof(int) );
}//init
bool
ConstraintStore::ISOLV ( AminoAcid* v ) {
#ifdef CSTORE_DBG_ISOLV
cout << _dbg < "ISOLV: enter..." << endl;
#endif
/// Init constraint store (first time)
if ( _not_init ) { init(); _not_init = false; }
/// Init iteration
_v_id = v->get_id();
_first_iteration = true;
_dom_size = g_logicvars.cp_variables[ _v_id ]->get_domain_size();
_q_size = gh_params.constraint_events[ _v_id ][ all_events ].size();
memset( _already_set_cons, 0, gh_params.num_cons * sizeof(int) );
memset( _already_set_cons_fix, 0, gh_params.num_cons * sizeof(int) );
/// Check whether constraints are at their fix-point
check_memcpy ( _constraint_queue, &gh_params.constraint_events[ _v_id ][ all_events ][ 0 ], &_q_size );
if ( !_q_size ) return true;
bool more_constraints = true;
while ( more_constraints ) {
#ifdef CSTORE_DBG_ISOLV
cout << "ISOLV: while: - # propagators:\t" << n_blocks << endl;
getchar();
#endif
more_constraints = false;
if ( !_first_iteration ) memset( _already_set_cons, 0, gh_params.num_cons * sizeof( int ) );
/// Propagation
if ( !propagation() ) { return false; }
/// Update queue
update_queue ();
}//while
#ifdef CSTORE_DBG_ISOLV
cout << _dbg << "ISOLV: ...exit" << endl;
#endif
return true;
}//ISOLVPAR
bool
ConstraintStore::propagation () {
int c_id, c_idx;
/// Loop throughout the queue of constraints
for ( int i = 0; i < _q_size; i++ ) {
c_id = _constraint_queue[ i ];
c_idx = gh_params.constraint_descriptions_idx[ c_id ];
func[ gh_params.constraint_descriptions[ c_idx ] ] ( _v_id, c_id, c_idx );
}//i
/// Copy events on host and check failure: SYNCHRONIZATION POINT
HANDLE_ERROR( hipMemcpy( _dom_events, gd_params.domain_events, sizeof(int), hipMemcpyDeviceToHost ) );
if ( _dom_events[ 0 ] == failed_event ) {
#ifdef CSTORE_DBG_ISOLV
cout << _dbg << "ISOLV: fail propagation" << endl;
#endif
return false;
}
return true;
}//propagation
void
ConstraintStore::update_queue () {
}//update_queue
void
ConstraintStore::check_memcpy ( int* queue_to, int* queue_from, int* size ) {
int fix_val, q_idx = 0, length = *size;
for ( int i = 0; i < length; i++ ) {
if ( _already_set_cons_fix[ queue_from[ i ] ] ) continue;
fix_val = g_constraints[ queue_from[ i ] ]->is_fix();
if ( fix_val == fix_prop ) {
/// Propagator is at a fix point: do not propagate it
(*size)--;
continue;
}
if ( fix_val == single_prop ) {
/// Propagator will be fix in the current run: do not propate it more than this loop
_already_set_cons_fix[ queue_from[ i ] ] = 1;
}
queue_to[ q_idx++ ] = queue_from[ i ];
}
}//check_memcpy
| constraint_store.cu | #include "constraint_store.h"
#include "propagator.h"
#include "constraint.h"
#include "aminoacid.h"
#include "logic_variables.h"
using namespace std;
using namespace Propagator;
//#define CSTORE_DBG_ISOLV
prop_func
func[ c_type_size ] = {
prop_c_sang,
prop_c_k_angle_shuffle,
prop_c_all_dist,
prop_c_k_rang,
prop_c_cg,
prop_c_mang,
prop_c_dist
};
ConstraintStore::ConstraintStore() :
_dbg ( "#log: Constraint Store - " ),
_not_init ( true ),
_constraint_queue ( NULL ),
_dom_events ( NULL ) {
}//-
ConstraintStore::~ConstraintStore() {
if ( !_constraint_queue ) free ( _constraint_queue );
if ( !_dom_events ) free ( _dom_events );
if ( !_already_set_cons ) free ( _already_set_cons );
if ( !_already_set_cons_fix ) free ( _already_set_cons_fix );
}//-
void
ConstraintStore::init() {
_dom_events = (int*) malloc( 2 * sizeof(int) );
_constraint_queue = (int*) malloc( gh_params.num_cons * sizeof(int) );
_already_set_cons = (int*) malloc( gh_params.num_cons * sizeof(int) );
_already_set_cons_fix = (int*) malloc( gh_params.num_cons * sizeof(int) );
}//init
bool
ConstraintStore::ISOLV ( AminoAcid* v ) {
#ifdef CSTORE_DBG_ISOLV
cout << _dbg < "ISOLV: enter..." << endl;
#endif
/// Init constraint store (first time)
if ( _not_init ) { init(); _not_init = false; }
/// Init iteration
_v_id = v->get_id();
_first_iteration = true;
_dom_size = g_logicvars.cp_variables[ _v_id ]->get_domain_size();
_q_size = gh_params.constraint_events[ _v_id ][ all_events ].size();
memset( _already_set_cons, 0, gh_params.num_cons * sizeof(int) );
memset( _already_set_cons_fix, 0, gh_params.num_cons * sizeof(int) );
/// Check whether constraints are at their fix-point
check_memcpy ( _constraint_queue, &gh_params.constraint_events[ _v_id ][ all_events ][ 0 ], &_q_size );
if ( !_q_size ) return true;
bool more_constraints = true;
while ( more_constraints ) {
#ifdef CSTORE_DBG_ISOLV
cout << "ISOLV: while: - # propagators:\t" << n_blocks << endl;
getchar();
#endif
more_constraints = false;
if ( !_first_iteration ) memset( _already_set_cons, 0, gh_params.num_cons * sizeof( int ) );
/// Propagation
if ( !propagation() ) { return false; }
/// Update queue
update_queue ();
}//while
#ifdef CSTORE_DBG_ISOLV
cout << _dbg << "ISOLV: ...exit" << endl;
#endif
return true;
}//ISOLVPAR
bool
ConstraintStore::propagation () {
int c_id, c_idx;
/// Loop throughout the queue of constraints
for ( int i = 0; i < _q_size; i++ ) {
c_id = _constraint_queue[ i ];
c_idx = gh_params.constraint_descriptions_idx[ c_id ];
func[ gh_params.constraint_descriptions[ c_idx ] ] ( _v_id, c_id, c_idx );
}//i
/// Copy events on host and check failure: SYNCHRONIZATION POINT
HANDLE_ERROR( cudaMemcpy( _dom_events, gd_params.domain_events, sizeof(int), cudaMemcpyDeviceToHost ) );
if ( _dom_events[ 0 ] == failed_event ) {
#ifdef CSTORE_DBG_ISOLV
cout << _dbg << "ISOLV: fail propagation" << endl;
#endif
return false;
}
return true;
}//propagation
void
ConstraintStore::update_queue () {
}//update_queue
void
ConstraintStore::check_memcpy ( int* queue_to, int* queue_from, int* size ) {
int fix_val, q_idx = 0, length = *size;
for ( int i = 0; i < length; i++ ) {
if ( _already_set_cons_fix[ queue_from[ i ] ] ) continue;
fix_val = g_constraints[ queue_from[ i ] ]->is_fix();
if ( fix_val == fix_prop ) {
/// Propagator is at a fix point: do not propagate it
(*size)--;
continue;
}
if ( fix_val == single_prop ) {
/// Propagator will be fix in the current run: do not propate it more than this loop
_already_set_cons_fix[ queue_from[ i ] ] = 1;
}
queue_to[ q_idx++ ] = queue_from[ i ];
}
}//check_memcpy
|
f646f0db025373c24f66d518da6e780359f3ba60.hip | // !!! This is a file automatically generated by hipify!!!
//---------------------------------------------------------------------------//
// Copyright (c) 2013-2014 Kyle Lutz <kyle.r.lutz@gmail.com>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec;
size_t rotate_distance = PERF_N / 2;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
// there is no thrust::rotate() so we implement it manually with copy()
thrust::device_vector<int> tmp(d_vec.begin(), d_vec.begin() + rotate_distance);
thrust::copy(d_vec.begin() + rotate_distance, d_vec.end(), d_vec.begin());
thrust::copy(tmp.begin(), tmp.end(), d_vec.begin() + rotate_distance);
hipDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
| f646f0db025373c24f66d518da6e780359f3ba60.cu | //---------------------------------------------------------------------------//
// Copyright (c) 2013-2014 Kyle Lutz <kyle.r.lutz@gmail.com>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec;
size_t rotate_distance = PERF_N / 2;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
// there is no thrust::rotate() so we implement it manually with copy()
thrust::device_vector<int> tmp(d_vec.begin(), d_vec.begin() + rotate_distance);
thrust::copy(d_vec.begin() + rotate_distance, d_vec.end(), d_vec.begin());
thrust::copy(tmp.begin(), tmp.end(), d_vec.begin() + rotate_distance);
cudaDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
|
f9ef8a2b31ae15015ab7c012543e3c81b61fe1e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernElementWiseMax(const size_t numPoints, double* dest, double* src) {
// Called to standardize arrays to be a power of two
// Assumes a 2D grid of 1D blocks
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
if(i < numPoints) {
if(dest[i] < src[i]) {
dest[i] = src[i];
}
}
} | f9ef8a2b31ae15015ab7c012543e3c81b61fe1e9.cu | #include "includes.h"
__global__ void kernElementWiseMax(const size_t numPoints, double* dest, double* src) {
// Called to standardize arrays to be a power of two
// Assumes a 2D grid of 1D blocks
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
if(i < numPoints) {
if(dest[i] < src[i]) {
dest[i] = src[i];
}
}
} |
8c134fbbf65947330233d5412f59f1c02809d253.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* This code is released into the public domain.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
///////////////////////////////////////////////////////////////////////////////////////////
// Definitions and helper utilities
// Block width for CUDA kernels
#define BW 128
#ifdef USE_GFLAGS
#ifndef _WIN32
#define gflags google
#endif
#else
// Constant versions of gflags
#define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value)
#define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value)
#define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value)
#define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value)
#define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value))
#endif
/**
* Computes ceil(x / y) for integral nonnegative values.
*/
__global__ void FillOnes(float *vec, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = 1.0f;
} | 8c134fbbf65947330233d5412f59f1c02809d253.cu | #include "includes.h"
/*
* This code is released into the public domain.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
///////////////////////////////////////////////////////////////////////////////////////////
// Definitions and helper utilities
// Block width for CUDA kernels
#define BW 128
#ifdef USE_GFLAGS
#ifndef _WIN32
#define gflags google
#endif
#else
// Constant versions of gflags
#define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value)
#define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value)
#define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value)
#define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value)
#define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value))
#endif
/**
* Computes ceil(x / y) for integral nonnegative values.
*/
__global__ void FillOnes(float *vec, int size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = 1.0f;
} |
4f8bf72307c8874e676082dfef21b613a1526163.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../inc/sufsort_kernel.cuh"
#include "../inc/segscan.cuh"
#include "../inc/radix_split.h"
#include <time.h>
#include <iostream>
#include <fstream>
#include <thrust/count.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <moderngpu.cuh>
#include <kernels/merge.cuh>
#define BLOCK_ID (gridDim.y * blockIdx.x + blockIdx.y)
#define THREAD_ID (threadIdx.x)
#define TID (BLOCK_ID * blockDim.x + THREAD_ID)
#define SEG_SORT_MAX 58720786
#define __TIMING_DETAIL__
typedef thrust::device_ptr<uint32> thrust_uint_p;
using namespace mgpu;
enum strategy_t
{
ALL_SEG_SORT,
M_SEG_SORT,
M_RADIX_SORT
};
//strategy used for sorting different types of h-groups
strategy_t strategy;
mgpu::ContextPtr context;
uint32 *h_mark;
//the thresold for small type groups
uint32 r1_thresh;
uint32 r2_thresh;
float init_sort = 0.0;
float ugroup_sort = 0.0;
float group_process = 0.0;
float stype_sort = 0.0;
float mtype_sort = 0.0;
float ltype_sort = 0.0;
float seg_sort = 0.0;
float isa_time = 0.0;
float get2ndkey = 0.0;
/*
float time;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("scatter and get_first_key time is %f\n", time);
hipEventDestroy(start);
hipEventDestroy(stop);
*/
void cudaCheckError(int line)
{
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if(err != hipSuccess)
printf("Last cuda error is %d at %d\n", err, line);
}
__global__ void compose_keys_kernel(uint32 *d_sa, uint64_t *d_tmp, uint32 *d_1stkey, uint32 *d_2ndkey, uint32 size, uint32 h_order);
/**
* wrapper function of b40c radix sort utility
*
* sort entries according to d_keys
*
*/
template<typename T>
void gpu_sort(T *d_keys, uint32 *d_values, uint32 size)
{
//b40c::radix_sort::Enactor enactor;
//b40c::util::DoubleBuffer<uint32, uint32> sort_storage(d_keys, d_values);
//enactor.Sort(sort_storage, size);
thrust::device_ptr<T> d_key_ptr = thrust::device_pointer_cast(d_keys);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_values);
thrust::sort_by_key(d_key_ptr, d_key_ptr+size, d_value_ptr);
return;
//MergesortPairs<T, uint32>(d_keys, d_values, size, *context);
}
//========================================================================================
__global__ void getSAfromISA(uint32 *d_isa, uint32 *d_sa, uint32 string_size)
{
uint32 tid = TID;
if(tid >= string_size) return;
int rank = d_isa[tid];
d_sa[rank] = tid;
}
__global__ void transform_init(uint32 *d_mark, uint32 *d_rank, uint32 string_size)
{
uint32 tid = (TID << 2);
if (tid >= string_size)
return;
uint4 mark4 = *(uint4*)(d_mark+tid);
uint4* d_rank_ptr = (uint4*)(d_rank+tid);
uint4 rank;
rank.x = tid & (0-mark4.x);
rank.y = (tid + 1) & (0-mark4.y);
//if(tid + 2 < string_size)
rank.z = (tid + 2) & (0-mark4.z);
//if(tid + 3 < string_size)
rank.w = (tid + 3) & (0-mark4.w);
*d_rank_ptr = rank;
}
__global__ void transform_init1(uint32 *d_rank, uint32 *d_mark, uint32 *d_index, uint32 index_size)
{
uint32 tid = TID;
if (tid >= index_size)
return;
d_rank[tid] = d_index[tid]*d_mark[tid];
}
int transform(uint32 *d_mark, uint32 *d_rank, uint32 *d_temp, uint32 string_size)
{
int numunique;
//thrust approach
thrust::device_ptr<uint32> dev_rank = thrust::device_pointer_cast(d_rank);
thrust::sequence(thrust::device, dev_rank, dev_rank + string_size);
thrust::device_ptr<uint32> dev_mark = thrust::device_pointer_cast(d_mark);
thrust::device_ptr<uint32> dev_temp = thrust::device_pointer_cast(d_temp);
thrust::multiplies<int> op;
thrust::transform(thrust::device, dev_mark, dev_mark + string_size, dev_rank, dev_rank, op);
numunique = thrust::count(thrust::device, dev_mark, dev_mark+string_size, 1);
thrust::inclusive_scan(thrust::device, dev_mark, dev_mark + string_size, dev_temp);
thrust::inclusive_scan_by_key(thrust::device, dev_temp, dev_temp + string_size, dev_rank, dev_rank);
return numunique;
}
int transform1(uint32 *d_mark, uint32 *d_c_index, uint32 *d_rank, uint32 *d_temp, uint32 index_size)
{
int numunique;
//thrust approach
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( transform_init1), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_rank, d_mark, d_c_index, index_size);
thrust::device_ptr<uint32> dev_mark = thrust::device_pointer_cast(d_mark);
numunique = thrust::count(dev_mark, dev_mark+index_size, 1);
thrust::device_ptr<uint32> dev_rank = thrust::device_pointer_cast(d_rank);
thrust::device_ptr<uint32> dev_temp = thrust::device_pointer_cast(d_temp);
thrust::inclusive_scan(dev_mark, dev_mark + index_size, dev_temp);
thrust::inclusive_scan_by_key(dev_temp, dev_temp + index_size, dev_rank, dev_rank);
return numunique;
}
__global__ void get_gt1_pos(uint32 *d_segstart_mark, uint32 *d_index, uint32 *d_seg_start, uint32 gt1_size)
{
uint32 tid = TID;
if(tid >= gt1_size)
return;
if(d_segstart_mark[tid]==1)
{
d_seg_start[tid] = d_index[tid];
}
else
d_seg_start[tid] = 0;
}
__global__ void get_segend_mark(uint32 *d_segstart_mark, uint32 *d_segend_mark, uint32 gt1_size)
{
uint32 tid = TID;
if(tid >= gt1_size)
return;
if(d_segstart_mark[tid]==1 && tid)
d_segend_mark[tid-1] = 1;
else if(tid == gt1_size-1)
d_segend_mark[tid] = 1;
}
__global__ void get_seg_len(uint32 *d_segstart, uint32 *d_seglen, uint32 numseg)
{
uint32 tid = TID;
if(tid >= numseg)
return;
d_seglen[tid] = d_seglen[tid] - d_segstart[tid]+1;
}
bool update_isa_stage1( uint32 *d_sa,
uint64_t *d_key64,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_globalIdx,
uint32 *d_isa_tmp,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size,
uint32 h_order,
uint32 init_order,
uint32 end_order)
{
float time;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(CEIL(string_size, 4), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipEventRecord(start, 0);
uint32 last_rank[] = {0xffffffff, 0, 0xffffffff};
mem_host2device(last_rank, d_isa_tmp+string_size, sizeof(uint32)*3);
//mark the start position of each segment to 1
if(h_order == init_order)
{
if(init_order == 8)
{
hipLaunchKernelGGL(( neighbour_comparison_long1), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, d_key64, string_size);
hipLaunchKernelGGL(( neighbour_comparison_long2), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, d_key64, string_size);
}
else if(init_order == 4)
{
hipLaunchKernelGGL(( neighbour_comparison_int1), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, (uint32*)d_key64, string_size);
hipLaunchKernelGGL(( neighbour_comparison_int2), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, (uint32*)d_key64, string_size);
}
else if(init_order == 1)
{
hipLaunchKernelGGL(( neighbour_comparison_char1), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, (uint8*)d_key64, string_size);
hipLaunchKernelGGL(( neighbour_comparison_char2), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, (uint8*)d_key64, string_size);
}
}
else
{
hipLaunchKernelGGL(( neighbour_comparison_long1), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, d_key64, string_size);
hipLaunchKernelGGL(( neighbour_comparison_long2), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, d_key64, string_size);
}
uint32 *d_temp = (uint32*)d_key64;
//in: d_isa_temp (mark)
//out: d_isa_out (rank)
num_unique = transform(d_isa_tmp, d_isa_out, d_temp, string_size);
//printf("number of unique ranks: %u\n", num_unique);
scatter(d_sa, d_isa_out, d_isa_in, string_size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
isa_time += time;
if (num_unique >= string_size)
return true;
if(h_order != end_order)
return false;
/////////////////////////////////////////
hipEventRecord(start, 0);
//compact global index to get compacted segment index
uint32 *d_gt1mark = (uint32*)d_key64;
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(string_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( mark_gt1_segment), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, d_gt1mark, d_globalIdx, string_size);
thrust_uint_p dev_index = thrust::device_pointer_cast(d_globalIdx);
thrust_uint_p dev_mark = thrust::device_pointer_cast(d_isa_tmp);
thrust_uint_p dev_stencil = thrust::device_pointer_cast(d_gt1mark);
//compact global index get indices for gt1 segment
thrust_uint_p new_end = thrust::remove_if(dev_index, dev_index + string_size, dev_stencil, thrust::identity<uint>());
thrust::remove_if(dev_mark, dev_mark + string_size, dev_stencil, thrust::identity<uint>());
if(strategy == ALL_SEG_SORT)
{
thrust_uint_p dev_sa = thrust::device_pointer_cast(d_sa);
thrust::remove_if(dev_sa, dev_sa + string_size, dev_stencil, thrust::identity<uint>());
}
index_size = new_end - dev_index;
uint32 *d_seg_start = d_gt1mark;
thrust_uint_p dev_start = thrust::device_pointer_cast(d_seg_start);
thrust_uint_p end = thrust::copy_if(dev_index, dev_index + index_size, dev_mark, dev_start, thrust::identity<uint>());
num_seg = end-dev_start;
uint32 *d_seg_len = d_seg_start + string_size;
hipMemset(d_isa_out, 0, index_size*sizeof(uint32));
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_segend_mark), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, d_isa_out, index_size);
//hipMemcpy(d_isa_in, d_seg_len, index_size*sizeof(uint32), hipMemcpyDeviceToDevice);
thrust_uint_p dev_end_mark = thrust::device_pointer_cast(d_isa_out);
thrust_uint_p dev_c_seglen = thrust::device_pointer_cast(d_seg_len);
end = thrust::copy_if(dev_index, dev_index + index_size, dev_end_mark, dev_c_seglen, thrust::identity<uint>());
if(num_seg != end-dev_c_seglen)
printf("error in thrust::copy_if, %d\n", end-dev_c_seglen);
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(num_seg, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_seg_len), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_seg_start, d_seg_len, num_seg);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
group_process += time;
hipEventDestroy(start);
hipEventDestroy(stop);
return false;
}
__global__ void scatter1(uint32 *d_sa, uint32 *d_rank, uint32 *d_index, uint32 *d_isa, uint32 index_size)
{
uint32 tid = TID;
if(tid >= index_size)
return;
int index = d_index[tid];
int sa = d_sa[index];
d_isa[sa] = d_rank[tid];
}
__global__ void scatter2(uint32 *d_sa, uint32 *d_rank, uint32 *d_index, uint32 *d_isa, uint32 index_size)
{
uint32 tid = TID;
if(tid >= index_size)
return;
//int index = d_index[tid];
int sa = d_sa[tid];
d_isa[sa] = d_rank[tid];
}
//TODO: d_isa_tmp may be remove finally (only reuse >1 segment pos here)
bool update_isa_stage2(
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_isa_tmp,
uint32 *d_block_start,
uint32 *d_block_len,
uint32 *d_c_index,
int *bound,
uint32 string_size,
uint32 &num_seg,
uint32 &index_size)
{
float time;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//assume we have a compacted index here (init. is global index, or computed for > 1 segments)
//get seg_index
//mark accord. to compacted index, get compacted mark, also use d_blk_start
//segmented scan mark to get rank
//scatter using compacted index and seg_rank
//for each rank value, record the pos of the segment end for it, using d_blk_len.
//compacted rank to get segment_start for next iteration
//compute new segment_len for next iteration
//sort new segment_len and segment start.
//... the following steps are the same.
hipEventRecord(start, 0);
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
if(strategy == ALL_SEG_SORT)
hipLaunchKernelGGL(( neighbour_compare2), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_c_index, d_isa_out, d_isa_tmp, index_size);
else
hipLaunchKernelGGL(( neighbour_compare), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_c_index, d_isa_out, d_isa_tmp, index_size);
int num_unique = transform1(d_isa_tmp, d_c_index, d_isa_out, d_block_len, index_size);
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
if(strategy == ALL_SEG_SORT)
hipLaunchKernelGGL(( scatter2), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_isa_out, d_c_index, d_isa_in, index_size);
else
hipLaunchKernelGGL(( scatter1), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_isa_out, d_c_index, d_isa_in, index_size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
isa_time += time;
if (num_unique >= index_size)
return true;
//printf("num_unique and index_size is %d, %d\n", num_unique, index_size);
hipEventRecord(start, 0);
uint32 *d_gt1mark = d_block_start;
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( mark_gt1_segment2), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, d_gt1mark, index_size);
thrust_uint_p dev_stencil = thrust::device_pointer_cast(d_gt1mark);
thrust_uint_p dev_index = thrust::device_pointer_cast(d_c_index);
thrust_uint_p dev_mark = thrust::device_pointer_cast(d_isa_tmp);
//compact global index get indices for gt1 segment
thrust_uint_p new_end = thrust::remove_if(dev_index, dev_index + index_size, dev_stencil, thrust::identity<uint>());
//compact seg start mark (d_isa_tmp) get start_mark for gt1 segment
thrust::remove_if(dev_mark, dev_mark + index_size, dev_stencil, thrust::identity<uint>());
if(strategy == ALL_SEG_SORT)
{
thrust_uint_p dev_sa = thrust::device_pointer_cast(d_sa);
thrust::remove_if(dev_sa, dev_sa + string_size, dev_stencil, thrust::identity<uint>());
}
index_size = new_end - dev_index;
thrust_uint_p dev_start = thrust::device_pointer_cast(d_block_start);
thrust_uint_p end = thrust::copy_if(dev_index, dev_index + index_size, dev_mark, dev_start, thrust::identity<uint>());
num_seg = end - dev_start;
hipMemset(d_isa_out, 0, index_size*sizeof(uint32));
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_segend_mark), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_tmp, d_isa_out, index_size);
thrust_uint_p dev_end = thrust::device_pointer_cast(d_block_len);
thrust_uint_p dev_segend_mark = thrust::device_pointer_cast(d_isa_out);
end = thrust::copy_if(dev_index, dev_index + index_size, dev_segend_mark, dev_end, thrust::identity<uint>());
if(num_seg != end - dev_end)
printf("error %d\n", __LINE__);
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(num_seg, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_seg_len), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_block_start, d_block_len, num_seg);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
group_process += time;
hipEventDestroy(start);
hipEventDestroy(stop);
return false;
}
bool prefix_doubling_sort(
uint64_t *d_key64,
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_ref,
uint32 *d_index,
uint32 *d_isa_tmp,
uint32 h_order,
uint32 init_order,
uint32 end_order,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size)
{
float time;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//generate bucket
if(h_order == init_order)
{
uint32 size_d_ref = CEIL(string_size, 4);
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL((size_d_ref+2), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
if(init_order == 8)
{
hipLaunchKernelGGL(( generate_8bucket), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_ref, d_key64, (size_d_ref+2));
}
else if(init_order == 4)
{
hipLaunchKernelGGL(( generate_4bucket), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_ref, (uint32*)d_key64, (size_d_ref+2));
}
else if(init_order == 1)
{
hipLaunchKernelGGL(( generate_1bucket), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_ref, (uint8*) d_key64, (size_d_ref+2));
}
else
{
cout << "init_order error, currently not supported" << endl;
exit(-1);
}
}
else
{
dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1);
dim3 blocks_per_grid(1, 1, 1);
blocks_per_grid.x = CEIL(CEIL(string_size, 4), threads_per_block.x);
hipLaunchKernelGGL(( compose_keys_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_sa, d_key64, d_isa_out, d_isa_in+h_order/2, string_size, h_order/2);
}
if(h_order == init_order)
{
if(init_order == 8)
{
thrust::device_ptr<uint64_t> d_key_ptr = thrust::device_pointer_cast(d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
else if(init_order == 4)
{
thrust::device_ptr<uint32> d_key_ptr = thrust::device_pointer_cast((uint32*)d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
else if(init_order == 1)
{
thrust::device_ptr<uint8> d_key_ptr = thrust::device_pointer_cast((uint8*)d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
}
else
{
thrust::device_ptr<uint64_t> d_key_ptr = thrust::device_pointer_cast(d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
//494ms for sprot34
/*
if(1)
{
thrust::device_ptr<uint64_t> d_key_ptr = thrust::device_pointer_cast(d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
else
{
gpu_sort<uint64_t>(d_key64, d_sa, string_size);
}*/
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
init_sort += time;
hipEventDestroy(start);
hipEventDestroy(stop);
//update isa
bool flag = update_isa_stage1(d_sa, d_key64, d_isa_in, d_isa_out, d_index, d_isa_tmp,
string_size, sorted, num_unique, num_seg,
index_size, h_order, init_order, end_order);
/*
bool update_isa_stage1( uint32 *d_sa,
uint64_t *d_key64,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_globalIdx,
uint32 *d_isa_tmp,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size,
uint32 h_order,
uint32 init_order,
uint32 end_order);
*/
return flag;
}
void sufsort_stage1(
uint64_t *d_key64,
uint32 *d_sa,
uint32 *&d_isa_in,
uint32 *&d_isa_out,
uint32 *d_ref,
uint32 *d_index,
uint32 *d_isa_tmp,
uint32 &h_order,
uint32 init_order,
uint32 end_order,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size)
{
/*
bool prefix_doubling_sort(
uint64_t *d_key64,
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_ref,
uint32 *d_index,
uint32 *d_isa_tmp,
uint32 h_order,
uint32 init_order,
uint32 end_order,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size)
*/
for (h_order = init_order; h_order <= end_order; h_order *= 2)
{
if(prefix_doubling_sort(d_key64, d_sa, d_isa_in, d_isa_out, d_ref, d_index, d_isa_tmp,
h_order, init_order, end_order, string_size, sorted, num_unique, num_seg, index_size))
{
//::swap(d_isa_in, d_isa_out);
h_order *= 2;
break;
}
//::swap(d_isa_in, d_isa_out);
}
}
bool stage_two_sort (
uint64_t *d_key64,
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_isa_tmp,
uint32 *d_index,
uint32 h_order,
uint32 string_size,
uint32 &num_seg,
uint32 &index_size,
uint32 digit_count,
uint32 *d_digits,
uint32 *d_tmp_store,
int *bound)
{
uint32* d_block_start = (uint32*)d_key64;
uint32* d_block_len = d_block_start + string_size;
float time;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//try different alternatives in papers
//1. all use segmented sort (mgpu)
//2. large part merge sort, median seg sort, small bitonic sort
//3. median radix sort
if(strategy != ALL_SEG_SORT)
{
hipEventRecord(start, 0);
thrust::device_ptr<uint32> d_key_ptr = thrust::device_pointer_cast(d_block_len);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_block_start);
thrust::sort_by_key(d_key_ptr, d_key_ptr+num_seg, d_value_ptr);
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(num_seg, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int *d_bound;
hipMalloc((void**)&d_bound, 16*sizeof(int));
hipMemset(d_bound, -1, 16*sizeof(int));
hipLaunchKernelGGL(( find_boundary_kernel_init), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_block_len, d_bound, num_seg, r2_thresh);
mem_device2host(d_bound, bound, sizeof(int)*16);
hipFree(d_bound);
bound[13] = num_seg;
for(int i=12; i>=0; i--)
{
if(bound[i]==-1)
bound[i] = bound[i+1];
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
group_process += time;
/*
time = 0.0;
hipEventRecord(start, 0);
if(strategy == M_RADIX_SORT)
{
int logthresh = (int)(log(r1_thresh)/log(2));
if(bound[logthresh] != -1 && num_seg-bound[logthresh]>0)
{
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = num_seg - bound[logthresh];
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_second_keys_stage_two), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_isa_in+h_order/2, d_isa_out,
d_block_start+bound[logthresh], d_block_len+bound[logthresh], numBlocks);
}
}
else
{
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
get2ndkey += time;
*/
time = 0.0;
hipEventRecord(start, 0);
//>65535
if(bound[12] != -1 && num_seg-bound[12] > 0)
{
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = num_seg - bound[12];
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_second_keys_stage_two), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_isa_in+h_order/2, d_isa_out, d_block_start+bound[12], d_block_len+bound[12], numBlocks);
uint32 *h_block_start = (uint32*)malloc((num_seg-bound[12])*sizeof(uint32));
uint32 *h_block_len = (uint32*)malloc((num_seg-bound[12])*sizeof(uint32));
hipMemcpy(h_block_start, d_block_start+bound[12], (num_seg-bound[12])*sizeof(uint32), hipMemcpyDeviceToHost);
hipMemcpy(h_block_len, d_block_len+bound[12], (num_seg-bound[12])*sizeof(uint32), hipMemcpyDeviceToHost);
for (uint32 i = 0; i < num_seg-bound[12]; i++)
{
gpu_sort<uint32>(d_isa_out+h_block_start[i], d_sa+h_block_start[i], h_block_len[i]);
}
free(h_block_start);
free(h_block_len);
}
else
{
bound[12] = num_seg;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
ltype_sort += time;
time = 0.0;
hipEventRecord(start, 0);
if(strategy == M_RADIX_SORT)
{
int logthresh = (int)(log(r1_thresh)/log(2));
if(bound[logthresh] != -1 && bound[12]-bound[logthresh]>0)
{
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = bound[12] - bound[logthresh];
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_second_keys_stage_two), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_isa_in+h_order/2, d_isa_out,
d_block_start+bound[logthresh], d_block_len+bound[logthresh], numBlocks);
}
if(bound[11] != -1 && bound[12] - bound[11] > 0)
{
//>2048 <65536
uint32 num_thread = NUM_THREAD_SEG_SORT;
uint32 block_count = bound[12] - bound[11];
uint32 *d_block_start_ptr = d_block_start +bound[11];
uint32 *d_block_len_ptr = d_block_len + bound[11];
//Partition *d_par_ptr = d_par + s_type_par_bound;
uint32 num_block = block_count < NUM_BLOCK_SEG_SORT ? block_count : NUM_BLOCK_SEG_SORT;
uint32 work_per_block = block_count/num_block + (block_count%num_block?1:0);
uint32 num_interval_for_pass2 = work_per_block/NUM_WARPS + (work_per_block%NUM_WARPS?1:0);
for (uint32 bit = 0; bit < 30; bit += 5)
{
HANDLE_ERROR(hipMemset(d_digits, 0, digit_count));
hipLaunchKernelGGL(( multiblock_radixsort_pass1), dim3(num_block), dim3(num_thread), 0, 0, d_isa_out, d_digits+32, d_block_start_ptr, d_block_len_ptr, bit, block_count);
hipLaunchKernelGGL(( multiblock_radixsort_pass2), dim3(num_block), dim3(num_thread), 0, 0, d_digits+32, d_block_len_ptr, num_interval_for_pass2, block_count);
hipLaunchKernelGGL(( multiblock_radixsort_pass3), dim3(num_block), dim3(num_thread), 0, 0, d_digits+32, d_isa_out, d_sa, d_block_start_ptr, d_block_len_ptr, d_tmp_store, bit, block_count);
}
}
else
{
bound[11] = bound[12];
}
if(logthresh < 11)
{
if(bound[logthresh] != -1 && bound[11] - bound[logthresh] > 0)
{
//S-type segment key-value sort
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = bound[11] - bound[logthresh];
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
for (uint32 bit = 0; bit < 30; bit +=5)
hipLaunchKernelGGL(( single_block_radixsort1), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_out, d_sa, d_block_start+bound[logthresh], d_block_len+bound[logthresh], bit, bound[11] - bound[logthresh]);
//bitonic_sort_kernel_gt256_isa<<<h_dimGrid, h_dimBlock>>>(d_block_len+bound[8], d_block_start+bound[8], d_sa, d_isa_in, d_isa_out, bound[11] - bound[8], h_order>>1);
}
else
{
bound[logthresh] = bound[11];
}
}
}
else
{
int logthresh = (int)(log(r1_thresh)/log(2));
if(logthresh < 8 || logthresh > 11)
printf("error\n");
if(bound[logthresh] != -1 && bound[12] - bound[logthresh] > 0)
{
unsigned int num_segment = bound[12] - bound[logthresh];
uint32 *d_len = (d_block_len + bound[logthresh]);
uint32 *d_pos;
hipMalloc((void**)&d_pos, num_segment*sizeof(uint32));
thrust::device_ptr<uint32> d_len_ptr = thrust::device_pointer_cast(d_len);
thrust::device_ptr<uint32> d_pos_ptr = thrust::device_pointer_cast(d_pos);
thrust::exclusive_scan(d_len_ptr, d_len_ptr+num_segment, d_pos_ptr);
unsigned int num_ele = thrust::reduce(d_len_ptr, d_len_ptr+num_segment);
if(num_ele >= SEG_SORT_MAX)
{
printf("the length exceeds the maximum length for MGPU segmented sort, please use radix sort for m-type groups!\n");
exit(0);
}
uint32 *d_keys, *d_vals;
hipMalloc((void**)&d_keys, num_ele*sizeof(uint32));
hipMalloc((void**)&d_vals, num_ele*sizeof(uint32));
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = num_segment;
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_pair_for_seg_sort), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_isa_in+h_order/2, d_keys, d_vals,
d_pos, d_block_start+bound[logthresh], d_block_len + bound[logthresh], numBlocks);
SegSortPairsFromIndices((int*)d_keys, (int*)d_vals, num_ele, (const int*)(d_pos+1), num_segment-1, *context);
cudaCheckError(__LINE__);
hipLaunchKernelGGL(( set_pair_for_seg_sort), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa, d_isa_out, d_keys, d_vals, d_pos,
d_block_start+bound[logthresh], d_block_len + bound[logthresh], numBlocks);
hipFree(d_pos);
hipFree(d_keys);
hipFree(d_vals);
}
else
{
bound[logthresh] = bound[12];
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
mtype_sort += time;
time = 0.0;
hipEventRecord(start, 0);
//TODO: separate > WARP_SIZE and < WARP_SIZE segment
for(int i=log(r1_thresh)/log(2)-1; i>=1; i--)
{
//sort segment with length: 2^i-2^(i+1)
if(bound[i] != -1 && bound[i+1]-bound[i] > 0)
{
if(r1_thresh == 256)
{
int segnum = 0x01<<(7-i);
//int seglen = 0x01<<(i+1);
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL((bound[i+1]-bound[i]), segnum);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( bitonic_sort_kernel_gt2n_isa), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0,
d_block_len+bound[i],
d_block_start+bound[i],
d_sa,
d_isa_in,
d_isa_out,
bound[i+1]-bound[i],
i+1,
h_order>>1);
}
else
{
int logthresh = (int)(log(r1_thresh)/log(2));
int segnum = 0x01<<(logthresh-1-i);
//int seglen = 0x01<<(i+1);
int round = r1_thresh/BLOCK_SIZE;
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL((bound[i+1]-bound[i]), segnum);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( bitonic_sort_kernel_gt2n_isa1), dim3(h_dimGrid), dim3(h_dimBlock), 2*r1_thresh*sizeof(uint32), 0,
d_block_len+bound[i],
d_block_start+bound[i],
d_sa,
d_isa_in,
d_isa_out,
bound[i+1]-bound[i],
i+1,
h_order>>1,
round,
logthresh);
}
}
else
bound[i] = bound[i+1];
}
//1-2
if(bound[0] != -1 && bound[1]-bound[0] > 0)
{
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL((bound[1]-bound[0]), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
//bitonic_sort_kernel2<<<h_dimGrid, h_dimBlock>>>(d_block_start+bound[0], d_sa, d_isa_out, bound[1]-bound[0]);
hipLaunchKernelGGL(( bitonic_sort_kernel2_isa), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_block_start+bound[0], d_sa, d_isa_in, d_isa_out, bound[1]-bound[0], h_order/2);
}
else
bound[0] = bound[1];
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
stype_sort += time;
}
else
{
hipEventRecord(start, 0);
hipMemcpy(h_mark, d_isa_tmp, index_size*sizeof(uint32), hipMemcpyDeviceToHost);
uint32 *d_keys, *d_vals;
d_keys = d_isa_out;
d_vals = d_sa;
//the maximum array length MGPU_SEG_SORT can processes
int max_length = SEG_SORT_MAX;
int idx_start = 0;
int idx_end = 0;
int seg_start = 0;
while(1)
{
idx_start = idx_end;
idx_end += max_length;
if(idx_end > index_size)
idx_end = index_size;
else
{
for(; idx_end>idx_start; idx_end--)
{
if(h_mark[idx_end] == 1)
break;
}
}
int length = idx_end-idx_start;
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(length, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( get_keys), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_sa+idx_start, d_isa_in+h_order/2, d_keys+idx_start, length, string_size);
thrust::device_ptr<uint32> dev_mark = thrust::device_pointer_cast(d_isa_tmp+idx_start);
unsigned int numseg = thrust::reduce(dev_mark, dev_mark+length);
uint32 *d_pos;
hipMalloc((void**)&d_pos, numseg*sizeof(uint32));
thrust::device_ptr<uint32> d_len_ptr = thrust::device_pointer_cast(d_block_len+seg_start);
thrust::device_ptr<uint32> d_pos_ptr = thrust::device_pointer_cast(d_pos);
thrust::inclusive_scan(d_len_ptr, d_len_ptr+numseg, d_pos_ptr);
SegSortPairsFromIndices(d_keys+idx_start, d_vals+idx_start, length, (const int*)d_pos, numseg-1, *context);
seg_start += numseg;
hipFree(d_pos);
if(idx_end == index_size)
{
break;
}
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
seg_sort += time;
//sort segment position according to segment length (replace with bucket?)
bool flag = update_isa_stage2(d_sa, d_isa_in, d_isa_out, d_isa_tmp, d_block_start,
d_block_len, d_index, bound, string_size, num_seg, index_size);
hipEventDestroy(start);
hipEventDestroy(stop);
return flag;
}
void sufsort_stage2(uint64_t *d_key64,
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_index,
uint32 *d_isa_tmp,
uint8 *h_buffer,
uint32 h_order,
uint32 string_size,
uint32 &num_seg,
uint32 &index_size)
{
int bound[16];
//allocate memory for segmented sort
uint32 digit_count = sizeof(uint32)*16*NUM_LIMIT*32;
uint32 *d_digits;// = (uint32*)allocate_device_memory(digit_count);
uint32 *d_tmp_store;// = (uint32*)allocate_device_memory(sizeof(uint32) * NUM_BLOCK_SEG_SORT * MAX_SEG_NUM *2);
if(strategy == M_RADIX_SORT)
{
d_digits = (uint32*)allocate_device_memory(digit_count);
d_tmp_store = (uint32*)allocate_device_memory(sizeof(uint32) * NUM_BLOCK_SEG_SORT * MAX_SEG_NUM *2);
}
for (; h_order < string_size; h_order *= 2)
{
bool flag = stage_two_sort(d_key64, d_sa, d_isa_in, d_isa_out, d_isa_tmp, d_index, h_order, string_size, num_seg, index_size, digit_count, d_digits, d_tmp_store, bound);
if(flag) break;
//check_h_order_correctness(d_sa, h_buffer, string_size, h_order);
}
if(strategy == ALL_SEG_SORT)
{
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(string_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
hipLaunchKernelGGL(( getSAfromISA), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_isa_in, d_sa, string_size);
}
if(strategy == M_RADIX_SORT)
{
free_device_memory(d_digits);
free_device_memory(d_tmp_store);
}
}
int main(int argc, char** argv)
{
if(argc < 3)
{
printf("usage: sufsort <filename> <init_h0>\n");
exit(-1);
}
FILE * pFile;
long size;
size_t result;
printf("%s %s %s %s %s\n", argv[1], argv[2], argv[3], argv[4], argv[5]);
uint32 h_order = atoi(argv[2]);
int end_order = h_order;
if(argc >= 4)
strategy = (strategy_t)atoi(argv[3]);
else
strategy = (strategy_t)1;
if(argc >= 5)
r1_thresh = atoi(argv[4]);
else
r1_thresh = 256;
if(argc >= 6)
r2_thresh = atoi(argv[5]);
else
r2_thresh = 65535;
if(argc >= 7)
end_order = atoi(argv[6]);
if(h_order != 8 && h_order != 4 && h_order != 1)
{
perror ("init h_order not supported, use 1, 4 or 8\n");
exit(1);
}
if(r1_thresh%256 != 0)
{
perror ("error, R1 threshold should be mutiple of 256\n");
exit(1);
}
pFile = fopen (argv[1],"r");
if (pFile==NULL) { perror ("Error opening file\n"); exit(1); }
fseek (pFile, 0, SEEK_END);
size=ftell(pFile);
rewind (pFile);
//printf ("file size is: %ld bytes.\n",size);
uint8 *h_buffer = (uint8*)malloc((size+4)*sizeof(uint8));
if (h_buffer == NULL) {fputs ("Memory error",stderr); exit (2);}
// copy the file into the buffer:
result = fread (h_buffer,1, size, pFile);
if (result != size) {fputs ("Reading error",stderr); exit (3);}
if(h_buffer[size-1] != 0)
{
h_buffer[size] = 0;
size+=1;
}
fclose(pFile);
uint32 ch_per_uint32 = 4;
uint32 size_d_ref = CEIL(size, ch_per_uint32);
uint32 ext_size = (size_d_ref+2)*ch_per_uint32;
uint32 num_unique = 0;
bool sorted = false;
//printf("string size and ceiled is %d, %d\n", size, ext_size);
/*set boundary of h_ref to default values*/
h_buffer = (uint8*)realloc(h_buffer, ext_size);
//uint8 *h_ref_8 = (uint8*)h_ref;
for (uint32 i = size; i < ext_size; i++)
h_buffer[i] = 0;
context = CreateCudaDevice(3);
/*
size_t freed;
size_t total;
hipMemGetInfo(&freed, &total);
printf("/////////free memory is %zd, and total is %zd\n", freed, total);
*/
h_mark = (uint32*)malloc(sizeof(uint32)*ext_size);
uint32* d_sa = (uint32*)allocate_device_memory(sizeof(uint32)*ext_size);
uint32* d_isa_in = (uint32*)allocate_device_memory(sizeof(uint32) * ext_size);
uint32* d_isa_out = (uint32*)allocate_device_memory(sizeof(uint32) * ext_size);
uint64_t* d_key = (uint64_t*)allocate_device_memory(sizeof(uint64_t) * ext_size);
uint32* d_index = (uint32*)allocate_device_memory(sizeof(uint32) * ext_size);
uint32 *d_isa_tmp = (uint32*)allocate_device_memory(sizeof(uint32)*(size+20));
//input is stored in d_isa_in
mem_host2device(h_buffer, d_isa_in, ext_size);
float time;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//hipEventRecord(start, 0);
uint32 num_seg, index_size;
//prefix_doubling_sort(d_key, d_sa, d_isa_in, d_isa_out, d_isa_in, d_index, d_isa_tmp, h_order, h_order, 8, size, sorted, num_unique, num_seg, index_size);
sufsort_stage1(d_key, d_sa, d_isa_in, d_isa_out,
d_isa_in, d_index, d_isa_tmp, h_order,
h_order, end_order, size, sorted, num_unique,
num_seg, index_size);
//check_h_order_correctness(d_sa, h_buffer, size, h_order);
/*
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
float init_sort1 = 0.0;
init_sort1 += time;
*/
hipEventRecord(start, 0);
if(!sorted)
{
//h_order *= 2;
sufsort_stage2(d_key, d_sa, d_isa_in, d_isa_out, d_index, d_isa_tmp, h_buffer, h_order, size, num_seg, index_size);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
ugroup_sort += time;
hipEventDestroy(start);
hipEventDestroy(stop);
printf("initial sorting time is %f (ms)\n", init_sort);
printf("unsorted group sorting time is %f (ms)\n", ugroup_sort);
if(strategy != ALL_SEG_SORT)
{
printf("s-type, m-type and l-type sorting time are %f, %f, %f\n", stype_sort, mtype_sort, ltype_sort);
printf("get sorting key time is %f\n", get2ndkey);
}
else
{
printf("segmented sorting time is %f (ms)\n", seg_sort);
}
printf("group processing time is %f (ms)\n", group_process);
printf("deriving ISA time is %f (ms)\n", isa_time);
printf("total suffix sorting time is %f (ms)\n", init_sort+stype_sort+mtype_sort+ltype_sort+get2ndkey+isa_time+group_process);
fprintf(stderr, "%f\t%f\t%f\t%f\t%f\t%f\n", init_sort, stype_sort, mtype_sort, ltype_sort, isa_time, group_process);
hipError_t err = hipGetLastError();
if(err != hipSuccess)
printf("last cudaerr is %d\n", err);
//check_h_order_correctness(d_sa, h_buffer, size, size);
printf("----------------------------------------------------------------\n");
//free memory
free(h_buffer);
free(h_mark);
free_device_memory(d_sa);
free_device_memory(d_index);
free_device_memory(d_isa_in);
free_device_memory(d_isa_out);
free_device_memory(d_key);
free_device_memory(d_isa_tmp);
return 0;
//cudppDestroy(theCudpp);
}
| 4f8bf72307c8874e676082dfef21b613a1526163.cu | #include "../inc/sufsort_kernel.cuh"
#include "../inc/segscan.cuh"
#include "../inc/radix_split.h"
#include <time.h>
#include <iostream>
#include <fstream>
#include <thrust/count.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <moderngpu.cuh>
#include <kernels/merge.cuh>
#define BLOCK_ID (gridDim.y * blockIdx.x + blockIdx.y)
#define THREAD_ID (threadIdx.x)
#define TID (BLOCK_ID * blockDim.x + THREAD_ID)
#define SEG_SORT_MAX 58720786
#define __TIMING_DETAIL__
typedef thrust::device_ptr<uint32> thrust_uint_p;
using namespace mgpu;
enum strategy_t
{
ALL_SEG_SORT,
M_SEG_SORT,
M_RADIX_SORT
};
//strategy used for sorting different types of h-groups
strategy_t strategy;
mgpu::ContextPtr context;
uint32 *h_mark;
//the thresold for small type groups
uint32 r1_thresh;
uint32 r2_thresh;
float init_sort = 0.0;
float ugroup_sort = 0.0;
float group_process = 0.0;
float stype_sort = 0.0;
float mtype_sort = 0.0;
float ltype_sort = 0.0;
float seg_sort = 0.0;
float isa_time = 0.0;
float get2ndkey = 0.0;
/*
float time;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("scatter and get_first_key time is %f\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
*/
void cudaCheckError(int line)
{
cudaThreadSynchronize();
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
printf("Last cuda error is %d at %d\n", err, line);
}
__global__ void compose_keys_kernel(uint32 *d_sa, uint64_t *d_tmp, uint32 *d_1stkey, uint32 *d_2ndkey, uint32 size, uint32 h_order);
/**
* wrapper function of b40c radix sort utility
*
* sort entries according to d_keys
*
*/
template<typename T>
void gpu_sort(T *d_keys, uint32 *d_values, uint32 size)
{
//b40c::radix_sort::Enactor enactor;
//b40c::util::DoubleBuffer<uint32, uint32> sort_storage(d_keys, d_values);
//enactor.Sort(sort_storage, size);
thrust::device_ptr<T> d_key_ptr = thrust::device_pointer_cast(d_keys);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_values);
thrust::sort_by_key(d_key_ptr, d_key_ptr+size, d_value_ptr);
return;
//MergesortPairs<T, uint32>(d_keys, d_values, size, *context);
}
//========================================================================================
__global__ void getSAfromISA(uint32 *d_isa, uint32 *d_sa, uint32 string_size)
{
uint32 tid = TID;
if(tid >= string_size) return;
int rank = d_isa[tid];
d_sa[rank] = tid;
}
__global__ void transform_init(uint32 *d_mark, uint32 *d_rank, uint32 string_size)
{
uint32 tid = (TID << 2);
if (tid >= string_size)
return;
uint4 mark4 = *(uint4*)(d_mark+tid);
uint4* d_rank_ptr = (uint4*)(d_rank+tid);
uint4 rank;
rank.x = tid & (0-mark4.x);
rank.y = (tid + 1) & (0-mark4.y);
//if(tid + 2 < string_size)
rank.z = (tid + 2) & (0-mark4.z);
//if(tid + 3 < string_size)
rank.w = (tid + 3) & (0-mark4.w);
*d_rank_ptr = rank;
}
__global__ void transform_init1(uint32 *d_rank, uint32 *d_mark, uint32 *d_index, uint32 index_size)
{
uint32 tid = TID;
if (tid >= index_size)
return;
d_rank[tid] = d_index[tid]*d_mark[tid];
}
int transform(uint32 *d_mark, uint32 *d_rank, uint32 *d_temp, uint32 string_size)
{
int numunique;
//thrust approach
thrust::device_ptr<uint32> dev_rank = thrust::device_pointer_cast(d_rank);
thrust::sequence(thrust::device, dev_rank, dev_rank + string_size);
thrust::device_ptr<uint32> dev_mark = thrust::device_pointer_cast(d_mark);
thrust::device_ptr<uint32> dev_temp = thrust::device_pointer_cast(d_temp);
thrust::multiplies<int> op;
thrust::transform(thrust::device, dev_mark, dev_mark + string_size, dev_rank, dev_rank, op);
numunique = thrust::count(thrust::device, dev_mark, dev_mark+string_size, 1);
thrust::inclusive_scan(thrust::device, dev_mark, dev_mark + string_size, dev_temp);
thrust::inclusive_scan_by_key(thrust::device, dev_temp, dev_temp + string_size, dev_rank, dev_rank);
return numunique;
}
int transform1(uint32 *d_mark, uint32 *d_c_index, uint32 *d_rank, uint32 *d_temp, uint32 index_size)
{
int numunique;
//thrust approach
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
transform_init1<<<h_dimGrid, h_dimBlock>>>(d_rank, d_mark, d_c_index, index_size);
thrust::device_ptr<uint32> dev_mark = thrust::device_pointer_cast(d_mark);
numunique = thrust::count(dev_mark, dev_mark+index_size, 1);
thrust::device_ptr<uint32> dev_rank = thrust::device_pointer_cast(d_rank);
thrust::device_ptr<uint32> dev_temp = thrust::device_pointer_cast(d_temp);
thrust::inclusive_scan(dev_mark, dev_mark + index_size, dev_temp);
thrust::inclusive_scan_by_key(dev_temp, dev_temp + index_size, dev_rank, dev_rank);
return numunique;
}
__global__ void get_gt1_pos(uint32 *d_segstart_mark, uint32 *d_index, uint32 *d_seg_start, uint32 gt1_size)
{
uint32 tid = TID;
if(tid >= gt1_size)
return;
if(d_segstart_mark[tid]==1)
{
d_seg_start[tid] = d_index[tid];
}
else
d_seg_start[tid] = 0;
}
__global__ void get_segend_mark(uint32 *d_segstart_mark, uint32 *d_segend_mark, uint32 gt1_size)
{
uint32 tid = TID;
if(tid >= gt1_size)
return;
if(d_segstart_mark[tid]==1 && tid)
d_segend_mark[tid-1] = 1;
else if(tid == gt1_size-1)
d_segend_mark[tid] = 1;
}
__global__ void get_seg_len(uint32 *d_segstart, uint32 *d_seglen, uint32 numseg)
{
uint32 tid = TID;
if(tid >= numseg)
return;
d_seglen[tid] = d_seglen[tid] - d_segstart[tid]+1;
}
bool update_isa_stage1( uint32 *d_sa,
uint64_t *d_key64,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_globalIdx,
uint32 *d_isa_tmp,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size,
uint32 h_order,
uint32 init_order,
uint32 end_order)
{
float time;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(CEIL(string_size, 4), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
cudaEventRecord(start, 0);
uint32 last_rank[] = {0xffffffff, 0, 0xffffffff};
mem_host2device(last_rank, d_isa_tmp+string_size, sizeof(uint32)*3);
//mark the start position of each segment to 1
if(h_order == init_order)
{
if(init_order == 8)
{
neighbour_comparison_long1<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, d_key64, string_size);
neighbour_comparison_long2<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, d_key64, string_size);
}
else if(init_order == 4)
{
neighbour_comparison_int1<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, (uint32*)d_key64, string_size);
neighbour_comparison_int2<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, (uint32*)d_key64, string_size);
}
else if(init_order == 1)
{
neighbour_comparison_char1<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, (uint8*)d_key64, string_size);
neighbour_comparison_char2<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, (uint8*)d_key64, string_size);
}
}
else
{
neighbour_comparison_long1<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, d_key64, string_size);
neighbour_comparison_long2<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, d_key64, string_size);
}
uint32 *d_temp = (uint32*)d_key64;
//in: d_isa_temp (mark)
//out: d_isa_out (rank)
num_unique = transform(d_isa_tmp, d_isa_out, d_temp, string_size);
//printf("number of unique ranks: %u\n", num_unique);
scatter(d_sa, d_isa_out, d_isa_in, string_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
isa_time += time;
if (num_unique >= string_size)
return true;
if(h_order != end_order)
return false;
/////////////////////////////////////////
cudaEventRecord(start, 0);
//compact global index to get compacted segment index
uint32 *d_gt1mark = (uint32*)d_key64;
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(string_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
mark_gt1_segment<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, d_gt1mark, d_globalIdx, string_size);
thrust_uint_p dev_index = thrust::device_pointer_cast(d_globalIdx);
thrust_uint_p dev_mark = thrust::device_pointer_cast(d_isa_tmp);
thrust_uint_p dev_stencil = thrust::device_pointer_cast(d_gt1mark);
//compact global index get indices for gt1 segment
thrust_uint_p new_end = thrust::remove_if(dev_index, dev_index + string_size, dev_stencil, thrust::identity<uint>());
thrust::remove_if(dev_mark, dev_mark + string_size, dev_stencil, thrust::identity<uint>());
if(strategy == ALL_SEG_SORT)
{
thrust_uint_p dev_sa = thrust::device_pointer_cast(d_sa);
thrust::remove_if(dev_sa, dev_sa + string_size, dev_stencil, thrust::identity<uint>());
}
index_size = new_end - dev_index;
uint32 *d_seg_start = d_gt1mark;
thrust_uint_p dev_start = thrust::device_pointer_cast(d_seg_start);
thrust_uint_p end = thrust::copy_if(dev_index, dev_index + index_size, dev_mark, dev_start, thrust::identity<uint>());
num_seg = end-dev_start;
uint32 *d_seg_len = d_seg_start + string_size;
cudaMemset(d_isa_out, 0, index_size*sizeof(uint32));
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_segend_mark<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, d_isa_out, index_size);
//cudaMemcpy(d_isa_in, d_seg_len, index_size*sizeof(uint32), cudaMemcpyDeviceToDevice);
thrust_uint_p dev_end_mark = thrust::device_pointer_cast(d_isa_out);
thrust_uint_p dev_c_seglen = thrust::device_pointer_cast(d_seg_len);
end = thrust::copy_if(dev_index, dev_index + index_size, dev_end_mark, dev_c_seglen, thrust::identity<uint>());
if(num_seg != end-dev_c_seglen)
printf("error in thrust::copy_if, %d\n", end-dev_c_seglen);
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(num_seg, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_seg_len<<<h_dimGrid, h_dimBlock>>>(d_seg_start, d_seg_len, num_seg);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
group_process += time;
cudaEventDestroy(start);
cudaEventDestroy(stop);
return false;
}
__global__ void scatter1(uint32 *d_sa, uint32 *d_rank, uint32 *d_index, uint32 *d_isa, uint32 index_size)
{
uint32 tid = TID;
if(tid >= index_size)
return;
int index = d_index[tid];
int sa = d_sa[index];
d_isa[sa] = d_rank[tid];
}
__global__ void scatter2(uint32 *d_sa, uint32 *d_rank, uint32 *d_index, uint32 *d_isa, uint32 index_size)
{
uint32 tid = TID;
if(tid >= index_size)
return;
//int index = d_index[tid];
int sa = d_sa[tid];
d_isa[sa] = d_rank[tid];
}
//TODO: d_isa_tmp may be remove finally (only reuse >1 segment pos here)
bool update_isa_stage2(
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_isa_tmp,
uint32 *d_block_start,
uint32 *d_block_len,
uint32 *d_c_index,
int *bound,
uint32 string_size,
uint32 &num_seg,
uint32 &index_size)
{
float time;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//assume we have a compacted index here (init. is global index, or computed for > 1 segments)
//get seg_index
//mark accord. to compacted index, get compacted mark, also use d_blk_start
//segmented scan mark to get rank
//scatter using compacted index and seg_rank
//for each rank value, record the pos of the segment end for it, using d_blk_len.
//compacted rank to get segment_start for next iteration
//compute new segment_len for next iteration
//sort new segment_len and segment start.
//... the following steps are the same.
cudaEventRecord(start, 0);
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
if(strategy == ALL_SEG_SORT)
neighbour_compare2<<<h_dimGrid, h_dimBlock>>>(d_c_index, d_isa_out, d_isa_tmp, index_size);
else
neighbour_compare<<<h_dimGrid, h_dimBlock>>>(d_c_index, d_isa_out, d_isa_tmp, index_size);
int num_unique = transform1(d_isa_tmp, d_c_index, d_isa_out, d_block_len, index_size);
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
if(strategy == ALL_SEG_SORT)
scatter2<<<h_dimGrid, h_dimBlock>>>(d_sa, d_isa_out, d_c_index, d_isa_in, index_size);
else
scatter1<<<h_dimGrid, h_dimBlock>>>(d_sa, d_isa_out, d_c_index, d_isa_in, index_size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
isa_time += time;
if (num_unique >= index_size)
return true;
//printf("num_unique and index_size is %d, %d\n", num_unique, index_size);
cudaEventRecord(start, 0);
uint32 *d_gt1mark = d_block_start;
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
mark_gt1_segment2<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, d_gt1mark, index_size);
thrust_uint_p dev_stencil = thrust::device_pointer_cast(d_gt1mark);
thrust_uint_p dev_index = thrust::device_pointer_cast(d_c_index);
thrust_uint_p dev_mark = thrust::device_pointer_cast(d_isa_tmp);
//compact global index get indices for gt1 segment
thrust_uint_p new_end = thrust::remove_if(dev_index, dev_index + index_size, dev_stencil, thrust::identity<uint>());
//compact seg start mark (d_isa_tmp) get start_mark for gt1 segment
thrust::remove_if(dev_mark, dev_mark + index_size, dev_stencil, thrust::identity<uint>());
if(strategy == ALL_SEG_SORT)
{
thrust_uint_p dev_sa = thrust::device_pointer_cast(d_sa);
thrust::remove_if(dev_sa, dev_sa + string_size, dev_stencil, thrust::identity<uint>());
}
index_size = new_end - dev_index;
thrust_uint_p dev_start = thrust::device_pointer_cast(d_block_start);
thrust_uint_p end = thrust::copy_if(dev_index, dev_index + index_size, dev_mark, dev_start, thrust::identity<uint>());
num_seg = end - dev_start;
cudaMemset(d_isa_out, 0, index_size*sizeof(uint32));
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(index_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_segend_mark<<<h_dimGrid, h_dimBlock>>>(d_isa_tmp, d_isa_out, index_size);
thrust_uint_p dev_end = thrust::device_pointer_cast(d_block_len);
thrust_uint_p dev_segend_mark = thrust::device_pointer_cast(d_isa_out);
end = thrust::copy_if(dev_index, dev_index + index_size, dev_segend_mark, dev_end, thrust::identity<uint>());
if(num_seg != end - dev_end)
printf("error %d\n", __LINE__);
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL(num_seg, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_seg_len<<<h_dimGrid, h_dimBlock>>>(d_block_start, d_block_len, num_seg);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
group_process += time;
cudaEventDestroy(start);
cudaEventDestroy(stop);
return false;
}
bool prefix_doubling_sort(
uint64_t *d_key64,
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_ref,
uint32 *d_index,
uint32 *d_isa_tmp,
uint32 h_order,
uint32 init_order,
uint32 end_order,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size)
{
float time;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//generate bucket
if(h_order == init_order)
{
uint32 size_d_ref = CEIL(string_size, 4);
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL((size_d_ref+2), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
if(init_order == 8)
{
generate_8bucket<<<h_dimGrid, h_dimBlock>>>(d_sa, d_ref, d_key64, (size_d_ref+2));
}
else if(init_order == 4)
{
generate_4bucket<<<h_dimGrid, h_dimBlock>>>(d_sa, d_ref, (uint32*)d_key64, (size_d_ref+2));
}
else if(init_order == 1)
{
generate_1bucket<<<h_dimGrid, h_dimBlock>>>(d_sa, d_ref, (uint8*) d_key64, (size_d_ref+2));
}
else
{
cout << "init_order error, currently not supported" << endl;
exit(-1);
}
}
else
{
dim3 threads_per_block(THREADS_PER_BLOCK, 1, 1);
dim3 blocks_per_grid(1, 1, 1);
blocks_per_grid.x = CEIL(CEIL(string_size, 4), threads_per_block.x);
compose_keys_kernel<<<blocks_per_grid, threads_per_block>>>(d_sa, d_key64, d_isa_out, d_isa_in+h_order/2, string_size, h_order/2);
}
if(h_order == init_order)
{
if(init_order == 8)
{
thrust::device_ptr<uint64_t> d_key_ptr = thrust::device_pointer_cast(d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
else if(init_order == 4)
{
thrust::device_ptr<uint32> d_key_ptr = thrust::device_pointer_cast((uint32*)d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
else if(init_order == 1)
{
thrust::device_ptr<uint8> d_key_ptr = thrust::device_pointer_cast((uint8*)d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
}
else
{
thrust::device_ptr<uint64_t> d_key_ptr = thrust::device_pointer_cast(d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
//494ms for sprot34
/*
if(1)
{
thrust::device_ptr<uint64_t> d_key_ptr = thrust::device_pointer_cast(d_key64);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_sa);
thrust::sort_by_key(d_key_ptr, d_key_ptr+string_size, d_value_ptr);
}
else
{
gpu_sort<uint64_t>(d_key64, d_sa, string_size);
}*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
init_sort += time;
cudaEventDestroy(start);
cudaEventDestroy(stop);
//update isa
bool flag = update_isa_stage1(d_sa, d_key64, d_isa_in, d_isa_out, d_index, d_isa_tmp,
string_size, sorted, num_unique, num_seg,
index_size, h_order, init_order, end_order);
/*
bool update_isa_stage1( uint32 *d_sa,
uint64_t *d_key64,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_globalIdx,
uint32 *d_isa_tmp,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size,
uint32 h_order,
uint32 init_order,
uint32 end_order);
*/
return flag;
}
void sufsort_stage1(
uint64_t *d_key64,
uint32 *d_sa,
uint32 *&d_isa_in,
uint32 *&d_isa_out,
uint32 *d_ref,
uint32 *d_index,
uint32 *d_isa_tmp,
uint32 &h_order,
uint32 init_order,
uint32 end_order,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size)
{
/*
bool prefix_doubling_sort(
uint64_t *d_key64,
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_ref,
uint32 *d_index,
uint32 *d_isa_tmp,
uint32 h_order,
uint32 init_order,
uint32 end_order,
uint32 string_size,
bool &sorted,
uint32 &num_unique,
uint32 &num_seg,
uint32 &index_size)
*/
for (h_order = init_order; h_order <= end_order; h_order *= 2)
{
if(prefix_doubling_sort(d_key64, d_sa, d_isa_in, d_isa_out, d_ref, d_index, d_isa_tmp,
h_order, init_order, end_order, string_size, sorted, num_unique, num_seg, index_size))
{
//::swap(d_isa_in, d_isa_out);
h_order *= 2;
break;
}
//::swap(d_isa_in, d_isa_out);
}
}
bool stage_two_sort (
uint64_t *d_key64,
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_isa_tmp,
uint32 *d_index,
uint32 h_order,
uint32 string_size,
uint32 &num_seg,
uint32 &index_size,
uint32 digit_count,
uint32 *d_digits,
uint32 *d_tmp_store,
int *bound)
{
uint32* d_block_start = (uint32*)d_key64;
uint32* d_block_len = d_block_start + string_size;
float time;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//try different alternatives in papers
//1. all use segmented sort (mgpu)
//2. large part merge sort, median seg sort, small bitonic sort
//3. median radix sort
if(strategy != ALL_SEG_SORT)
{
cudaEventRecord(start, 0);
thrust::device_ptr<uint32> d_key_ptr = thrust::device_pointer_cast(d_block_len);
thrust::device_ptr<uint32> d_value_ptr = thrust::device_pointer_cast(d_block_start);
thrust::sort_by_key(d_key_ptr, d_key_ptr+num_seg, d_value_ptr);
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(num_seg, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
int *d_bound;
cudaMalloc((void**)&d_bound, 16*sizeof(int));
cudaMemset(d_bound, -1, 16*sizeof(int));
find_boundary_kernel_init<<<h_dimGrid, h_dimBlock>>>(d_block_len, d_bound, num_seg, r2_thresh);
mem_device2host(d_bound, bound, sizeof(int)*16);
cudaFree(d_bound);
bound[13] = num_seg;
for(int i=12; i>=0; i--)
{
if(bound[i]==-1)
bound[i] = bound[i+1];
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
group_process += time;
/*
time = 0.0;
cudaEventRecord(start, 0);
if(strategy == M_RADIX_SORT)
{
int logthresh = (int)(log(r1_thresh)/log(2));
if(bound[logthresh] != -1 && num_seg-bound[logthresh]>0)
{
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = num_seg - bound[logthresh];
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_second_keys_stage_two<<<h_dimGrid, h_dimBlock>>>(d_sa, d_isa_in+h_order/2, d_isa_out,
d_block_start+bound[logthresh], d_block_len+bound[logthresh], numBlocks);
}
}
else
{
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
get2ndkey += time;
*/
time = 0.0;
cudaEventRecord(start, 0);
//>65535
if(bound[12] != -1 && num_seg-bound[12] > 0)
{
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = num_seg - bound[12];
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_second_keys_stage_two<<<h_dimGrid, h_dimBlock>>>(d_sa, d_isa_in+h_order/2, d_isa_out, d_block_start+bound[12], d_block_len+bound[12], numBlocks);
uint32 *h_block_start = (uint32*)malloc((num_seg-bound[12])*sizeof(uint32));
uint32 *h_block_len = (uint32*)malloc((num_seg-bound[12])*sizeof(uint32));
cudaMemcpy(h_block_start, d_block_start+bound[12], (num_seg-bound[12])*sizeof(uint32), cudaMemcpyDeviceToHost);
cudaMemcpy(h_block_len, d_block_len+bound[12], (num_seg-bound[12])*sizeof(uint32), cudaMemcpyDeviceToHost);
for (uint32 i = 0; i < num_seg-bound[12]; i++)
{
gpu_sort<uint32>(d_isa_out+h_block_start[i], d_sa+h_block_start[i], h_block_len[i]);
}
free(h_block_start);
free(h_block_len);
}
else
{
bound[12] = num_seg;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
ltype_sort += time;
time = 0.0;
cudaEventRecord(start, 0);
if(strategy == M_RADIX_SORT)
{
int logthresh = (int)(log(r1_thresh)/log(2));
if(bound[logthresh] != -1 && bound[12]-bound[logthresh]>0)
{
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = bound[12] - bound[logthresh];
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_second_keys_stage_two<<<h_dimGrid, h_dimBlock>>>(d_sa, d_isa_in+h_order/2, d_isa_out,
d_block_start+bound[logthresh], d_block_len+bound[logthresh], numBlocks);
}
if(bound[11] != -1 && bound[12] - bound[11] > 0)
{
//>2048 <65536
uint32 num_thread = NUM_THREAD_SEG_SORT;
uint32 block_count = bound[12] - bound[11];
uint32 *d_block_start_ptr = d_block_start +bound[11];
uint32 *d_block_len_ptr = d_block_len + bound[11];
//Partition *d_par_ptr = d_par + s_type_par_bound;
uint32 num_block = block_count < NUM_BLOCK_SEG_SORT ? block_count : NUM_BLOCK_SEG_SORT;
uint32 work_per_block = block_count/num_block + (block_count%num_block?1:0);
uint32 num_interval_for_pass2 = work_per_block/NUM_WARPS + (work_per_block%NUM_WARPS?1:0);
for (uint32 bit = 0; bit < 30; bit += 5)
{
HANDLE_ERROR(cudaMemset(d_digits, 0, digit_count));
multiblock_radixsort_pass1<<<num_block, num_thread>>>(d_isa_out, d_digits+32, d_block_start_ptr, d_block_len_ptr, bit, block_count);
multiblock_radixsort_pass2<<<num_block, num_thread>>>(d_digits+32, d_block_len_ptr, num_interval_for_pass2, block_count);
multiblock_radixsort_pass3<<<num_block, num_thread>>>(d_digits+32, d_isa_out, d_sa, d_block_start_ptr, d_block_len_ptr, d_tmp_store, bit, block_count);
}
}
else
{
bound[11] = bound[12];
}
if(logthresh < 11)
{
if(bound[logthresh] != -1 && bound[11] - bound[logthresh] > 0)
{
//S-type segment key-value sort
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = bound[11] - bound[logthresh];
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
for (uint32 bit = 0; bit < 30; bit +=5)
single_block_radixsort1<<<h_dimGrid, h_dimBlock>>>(d_isa_out, d_sa, d_block_start+bound[logthresh], d_block_len+bound[logthresh], bit, bound[11] - bound[logthresh]);
//bitonic_sort_kernel_gt256_isa<<<h_dimGrid, h_dimBlock>>>(d_block_len+bound[8], d_block_start+bound[8], d_sa, d_isa_in, d_isa_out, bound[11] - bound[8], h_order>>1);
}
else
{
bound[logthresh] = bound[11];
}
}
}
else
{
int logthresh = (int)(log(r1_thresh)/log(2));
if(logthresh < 8 || logthresh > 11)
printf("error\n");
if(bound[logthresh] != -1 && bound[12] - bound[logthresh] > 0)
{
unsigned int num_segment = bound[12] - bound[logthresh];
uint32 *d_len = (d_block_len + bound[logthresh]);
uint32 *d_pos;
cudaMalloc((void**)&d_pos, num_segment*sizeof(uint32));
thrust::device_ptr<uint32> d_len_ptr = thrust::device_pointer_cast(d_len);
thrust::device_ptr<uint32> d_pos_ptr = thrust::device_pointer_cast(d_pos);
thrust::exclusive_scan(d_len_ptr, d_len_ptr+num_segment, d_pos_ptr);
unsigned int num_ele = thrust::reduce(d_len_ptr, d_len_ptr+num_segment);
if(num_ele >= SEG_SORT_MAX)
{
printf("the length exceeds the maximum length for MGPU segmented sort, please use radix sort for m-type groups!\n");
exit(0);
}
uint32 *d_keys, *d_vals;
cudaMalloc((void**)&d_keys, num_ele*sizeof(uint32));
cudaMalloc((void**)&d_vals, num_ele*sizeof(uint32));
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = num_segment;
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_pair_for_seg_sort<<<h_dimGrid, h_dimBlock>>>(d_sa, d_isa_in+h_order/2, d_keys, d_vals,
d_pos, d_block_start+bound[logthresh], d_block_len + bound[logthresh], numBlocks);
SegSortPairsFromIndices((int*)d_keys, (int*)d_vals, num_ele, (const int*)(d_pos+1), num_segment-1, *context);
cudaCheckError(__LINE__);
set_pair_for_seg_sort<<<h_dimGrid, h_dimBlock>>>(d_sa, d_isa_out, d_keys, d_vals, d_pos,
d_block_start+bound[logthresh], d_block_len + bound[logthresh], numBlocks);
cudaFree(d_pos);
cudaFree(d_keys);
cudaFree(d_vals);
}
else
{
bound[logthresh] = bound[12];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
mtype_sort += time;
time = 0.0;
cudaEventRecord(start, 0);
//TODO: separate > WARP_SIZE and < WARP_SIZE segment
for(int i=log(r1_thresh)/log(2)-1; i>=1; i--)
{
//sort segment with length: 2^i-2^(i+1)
if(bound[i] != -1 && bound[i+1]-bound[i] > 0)
{
if(r1_thresh == 256)
{
int segnum = 0x01<<(7-i);
//int seglen = 0x01<<(i+1);
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL((bound[i+1]-bound[i]), segnum);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
bitonic_sort_kernel_gt2n_isa<<<h_dimGrid, h_dimBlock>>>(
d_block_len+bound[i],
d_block_start+bound[i],
d_sa,
d_isa_in,
d_isa_out,
bound[i+1]-bound[i],
i+1,
h_order>>1);
}
else
{
int logthresh = (int)(log(r1_thresh)/log(2));
int segnum = 0x01<<(logthresh-1-i);
//int seglen = 0x01<<(i+1);
int round = r1_thresh/BLOCK_SIZE;
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL((bound[i+1]-bound[i]), segnum);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
bitonic_sort_kernel_gt2n_isa1<<<h_dimGrid, h_dimBlock, 2*r1_thresh*sizeof(uint32)>>>(
d_block_len+bound[i],
d_block_start+bound[i],
d_sa,
d_isa_in,
d_isa_out,
bound[i+1]-bound[i],
i+1,
h_order>>1,
round,
logthresh);
}
}
else
bound[i] = bound[i+1];
}
//1-2
if(bound[0] != -1 && bound[1]-bound[0] > 0)
{
h_dimGrid.x = h_dimGrid.y = 1;
numBlocks = CEIL((bound[1]-bound[0]), h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
//bitonic_sort_kernel2<<<h_dimGrid, h_dimBlock>>>(d_block_start+bound[0], d_sa, d_isa_out, bound[1]-bound[0]);
bitonic_sort_kernel2_isa<<<h_dimGrid, h_dimBlock>>>(d_block_start+bound[0], d_sa, d_isa_in, d_isa_out, bound[1]-bound[0], h_order/2);
}
else
bound[0] = bound[1];
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
stype_sort += time;
}
else
{
cudaEventRecord(start, 0);
cudaMemcpy(h_mark, d_isa_tmp, index_size*sizeof(uint32), cudaMemcpyDeviceToHost);
uint32 *d_keys, *d_vals;
d_keys = d_isa_out;
d_vals = d_sa;
//the maximum array length MGPU_SEG_SORT can processes
int max_length = SEG_SORT_MAX;
int idx_start = 0;
int idx_end = 0;
int seg_start = 0;
while(1)
{
idx_start = idx_end;
idx_end += max_length;
if(idx_end > index_size)
idx_end = index_size;
else
{
for(; idx_end>idx_start; idx_end--)
{
if(h_mark[idx_end] == 1)
break;
}
}
int length = idx_end-idx_start;
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(length, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
get_keys<<<h_dimGrid, h_dimBlock>>>(d_sa+idx_start, d_isa_in+h_order/2, d_keys+idx_start, length, string_size);
thrust::device_ptr<uint32> dev_mark = thrust::device_pointer_cast(d_isa_tmp+idx_start);
unsigned int numseg = thrust::reduce(dev_mark, dev_mark+length);
uint32 *d_pos;
cudaMalloc((void**)&d_pos, numseg*sizeof(uint32));
thrust::device_ptr<uint32> d_len_ptr = thrust::device_pointer_cast(d_block_len+seg_start);
thrust::device_ptr<uint32> d_pos_ptr = thrust::device_pointer_cast(d_pos);
thrust::inclusive_scan(d_len_ptr, d_len_ptr+numseg, d_pos_ptr);
SegSortPairsFromIndices(d_keys+idx_start, d_vals+idx_start, length, (const int*)d_pos, numseg-1, *context);
seg_start += numseg;
cudaFree(d_pos);
if(idx_end == index_size)
{
break;
}
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
seg_sort += time;
//sort segment position according to segment length (replace with bucket?)
bool flag = update_isa_stage2(d_sa, d_isa_in, d_isa_out, d_isa_tmp, d_block_start,
d_block_len, d_index, bound, string_size, num_seg, index_size);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return flag;
}
void sufsort_stage2(uint64_t *d_key64,
uint32 *d_sa,
uint32 *d_isa_in,
uint32 *d_isa_out,
uint32 *d_index,
uint32 *d_isa_tmp,
uint8 *h_buffer,
uint32 h_order,
uint32 string_size,
uint32 &num_seg,
uint32 &index_size)
{
int bound[16];
//allocate memory for segmented sort
uint32 digit_count = sizeof(uint32)*16*NUM_LIMIT*32;
uint32 *d_digits;// = (uint32*)allocate_device_memory(digit_count);
uint32 *d_tmp_store;// = (uint32*)allocate_device_memory(sizeof(uint32) * NUM_BLOCK_SEG_SORT * MAX_SEG_NUM *2);
if(strategy == M_RADIX_SORT)
{
d_digits = (uint32*)allocate_device_memory(digit_count);
d_tmp_store = (uint32*)allocate_device_memory(sizeof(uint32) * NUM_BLOCK_SEG_SORT * MAX_SEG_NUM *2);
}
for (; h_order < string_size; h_order *= 2)
{
bool flag = stage_two_sort(d_key64, d_sa, d_isa_in, d_isa_out, d_isa_tmp, d_index, h_order, string_size, num_seg, index_size, digit_count, d_digits, d_tmp_store, bound);
if(flag) break;
//check_h_order_correctness(d_sa, h_buffer, string_size, h_order);
}
if(strategy == ALL_SEG_SORT)
{
dim3 h_dimBlock(BLOCK_SIZE,1,1);
dim3 h_dimGrid(1,1,1);
int numBlocks = CEIL(string_size, h_dimBlock.x);
THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x);
getSAfromISA<<<h_dimGrid, h_dimBlock>>>(d_isa_in, d_sa, string_size);
}
if(strategy == M_RADIX_SORT)
{
free_device_memory(d_digits);
free_device_memory(d_tmp_store);
}
}
int main(int argc, char** argv)
{
if(argc < 3)
{
printf("usage: sufsort <filename> <init_h0>\n");
exit(-1);
}
FILE * pFile;
long size;
size_t result;
printf("%s %s %s %s %s\n", argv[1], argv[2], argv[3], argv[4], argv[5]);
uint32 h_order = atoi(argv[2]);
int end_order = h_order;
if(argc >= 4)
strategy = (strategy_t)atoi(argv[3]);
else
strategy = (strategy_t)1;
if(argc >= 5)
r1_thresh = atoi(argv[4]);
else
r1_thresh = 256;
if(argc >= 6)
r2_thresh = atoi(argv[5]);
else
r2_thresh = 65535;
if(argc >= 7)
end_order = atoi(argv[6]);
if(h_order != 8 && h_order != 4 && h_order != 1)
{
perror ("init h_order not supported, use 1, 4 or 8\n");
exit(1);
}
if(r1_thresh%256 != 0)
{
perror ("error, R1 threshold should be mutiple of 256\n");
exit(1);
}
pFile = fopen (argv[1],"r");
if (pFile==NULL) { perror ("Error opening file\n"); exit(1); }
fseek (pFile, 0, SEEK_END);
size=ftell(pFile);
rewind (pFile);
//printf ("file size is: %ld bytes.\n",size);
uint8 *h_buffer = (uint8*)malloc((size+4)*sizeof(uint8));
if (h_buffer == NULL) {fputs ("Memory error",stderr); exit (2);}
// copy the file into the buffer:
result = fread (h_buffer,1, size, pFile);
if (result != size) {fputs ("Reading error",stderr); exit (3);}
if(h_buffer[size-1] != 0)
{
h_buffer[size] = 0;
size+=1;
}
fclose(pFile);
uint32 ch_per_uint32 = 4;
uint32 size_d_ref = CEIL(size, ch_per_uint32);
uint32 ext_size = (size_d_ref+2)*ch_per_uint32;
uint32 num_unique = 0;
bool sorted = false;
//printf("string size and ceiled is %d, %d\n", size, ext_size);
/*set boundary of h_ref to default values*/
h_buffer = (uint8*)realloc(h_buffer, ext_size);
//uint8 *h_ref_8 = (uint8*)h_ref;
for (uint32 i = size; i < ext_size; i++)
h_buffer[i] = 0;
context = CreateCudaDevice(3);
/*
size_t freed;
size_t total;
cudaMemGetInfo(&freed, &total);
printf("/////////free memory is %zd, and total is %zd\n", freed, total);
*/
h_mark = (uint32*)malloc(sizeof(uint32)*ext_size);
uint32* d_sa = (uint32*)allocate_device_memory(sizeof(uint32)*ext_size);
uint32* d_isa_in = (uint32*)allocate_device_memory(sizeof(uint32) * ext_size);
uint32* d_isa_out = (uint32*)allocate_device_memory(sizeof(uint32) * ext_size);
uint64_t* d_key = (uint64_t*)allocate_device_memory(sizeof(uint64_t) * ext_size);
uint32* d_index = (uint32*)allocate_device_memory(sizeof(uint32) * ext_size);
uint32 *d_isa_tmp = (uint32*)allocate_device_memory(sizeof(uint32)*(size+20));
//input is stored in d_isa_in
mem_host2device(h_buffer, d_isa_in, ext_size);
float time;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//cudaEventRecord(start, 0);
uint32 num_seg, index_size;
//prefix_doubling_sort(d_key, d_sa, d_isa_in, d_isa_out, d_isa_in, d_index, d_isa_tmp, h_order, h_order, 8, size, sorted, num_unique, num_seg, index_size);
sufsort_stage1(d_key, d_sa, d_isa_in, d_isa_out,
d_isa_in, d_index, d_isa_tmp, h_order,
h_order, end_order, size, sorted, num_unique,
num_seg, index_size);
//check_h_order_correctness(d_sa, h_buffer, size, h_order);
/*
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
float init_sort1 = 0.0;
init_sort1 += time;
*/
cudaEventRecord(start, 0);
if(!sorted)
{
//h_order *= 2;
sufsort_stage2(d_key, d_sa, d_isa_in, d_isa_out, d_index, d_isa_tmp, h_buffer, h_order, size, num_seg, index_size);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
ugroup_sort += time;
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("initial sorting time is %f (ms)\n", init_sort);
printf("unsorted group sorting time is %f (ms)\n", ugroup_sort);
if(strategy != ALL_SEG_SORT)
{
printf("s-type, m-type and l-type sorting time are %f, %f, %f\n", stype_sort, mtype_sort, ltype_sort);
printf("get sorting key time is %f\n", get2ndkey);
}
else
{
printf("segmented sorting time is %f (ms)\n", seg_sort);
}
printf("group processing time is %f (ms)\n", group_process);
printf("deriving ISA time is %f (ms)\n", isa_time);
printf("total suffix sorting time is %f (ms)\n", init_sort+stype_sort+mtype_sort+ltype_sort+get2ndkey+isa_time+group_process);
fprintf(stderr, "%f\t%f\t%f\t%f\t%f\t%f\n", init_sort, stype_sort, mtype_sort, ltype_sort, isa_time, group_process);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
printf("last cudaerr is %d\n", err);
//check_h_order_correctness(d_sa, h_buffer, size, size);
printf("----------------------------------------------------------------\n");
//free memory
free(h_buffer);
free(h_mark);
free_device_memory(d_sa);
free_device_memory(d_index);
free_device_memory(d_isa_in);
free_device_memory(d_isa_out);
free_device_memory(d_key);
free_device_memory(d_isa_tmp);
return 0;
//cudppDestroy(theCudpp);
}
|
5354dd2a053612eee074cfc6eacab9b6e1840a7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
greyImage[thread_1D_pos] = .299f * rgbaImage[thread_1D_pos].x +
.587f * rgbaImage[thread_1D_pos].y +
.114f * rgbaImage[thread_1D_pos].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(8, 8, 1); //TODO
const dim3 gridSize((numCols+7)/8, (numRows+7)/8, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 5354dd2a053612eee074cfc6eacab9b6e1840a7b.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
greyImage[thread_1D_pos] = .299f * rgbaImage[thread_1D_pos].x +
.587f * rgbaImage[thread_1D_pos].y +
.114f * rgbaImage[thread_1D_pos].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(8, 8, 1); //TODO
const dim3 gridSize((numCols+7)/8, (numRows+7)/8, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
129027758cfc0ee901582d6a6b82d88a829b3da2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS_X 16
#define THREADS_Y 16
__global__
void convolve2(int *out_ptr, int *signal_ptr, int nBBS0, int *out_strides, int *out_dims, int *signal_strides, int *signal_dims,
int nBBS1, int o2, int o3, int s2, int s3, int expand, int fLen0, int fLen1)
{
const unsigned C_SIZE = 512;
__shared__ int shrdMem[512];
const int radius0 = fLen0-1;
const int radius1 = fLen1-1;
const int padding0 = 2*radius0;
const int padding1 = 2*radius1;
const int shrdLen0 = THREADS_X + padding0;
const int shrdLen1 = THREADS_Y + padding1;
unsigned b0 = blockIdx.x / nBBS0;
unsigned b1 = (blockIdx.y + blockIdx.z * gridDim.y) / nBBS1;
int *dst = (int *)out_ptr + (b0 * out_strides[2] + /* activated with batched input signal */
o2 * out_strides[2] + /* activated with batched input filter */
b1 * out_strides[3] + /* activated with batched input signal */
o3 * out_strides[3]); /* activated with batched input filter */
const int *src = (const int *)signal_ptr + (b0 * signal_strides[2] + /* activated with batched input signal */
s2 * signal_strides[2] + /* activated with batched input filter */
b1 * signal_strides[3] + /* activated with batched input signal */
s3 * signal_strides[3]); /* activated with batched input filter */
int lx = threadIdx.x;
int ly = threadIdx.y;
int gx = THREADS_X * (blockIdx.x-b0*nBBS0) + lx;
int gy = THREADS_Y * ((blockIdx.y + blockIdx.z * gridDim.y) -b1*nBBS1) + ly;
if(b1 >= out_dims[3])
return;
int s0 = signal_strides[0];
int s1 = signal_strides[1];
int d0 = signal_dims[0];
int d1 = signal_dims[1];
// below loops are traditional loops, they only run multiple
// times filter length is more than launch size
for (int b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) {
int j = gy2-radius1;
bool is_j = j>=0 && j<d1;
// move row_set THREADS_Y along coloumns
for (int a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) {
int i = gx2-radius0;
bool is_i = i>=0 && i<d0;
shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : 0);
}
}
__syncthreads();
if (gx<out_dims[0] && gy<out_dims[1]) {
int ci = lx + radius0 + (expand ? 0 : fLen0>>1);
int cj = ly + radius1 + (expand ? 0 : fLen1>>1);
int accum = 0;
for(int fj=0; fj<fLen1; ++fj) {
for(int fi=0; fi<fLen0; ++fi) {
int s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)];
accum = accum + s_val;
}
}
dst[gy*out_strides[1]+gx] = (int)accum;
}
} | 129027758cfc0ee901582d6a6b82d88a829b3da2.cu | #define THREADS_X 16
#define THREADS_Y 16
__global__
void convolve2(int *out_ptr, int *signal_ptr, int nBBS0, int *out_strides, int *out_dims, int *signal_strides, int *signal_dims,
int nBBS1, int o2, int o3, int s2, int s3, int expand, int fLen0, int fLen1)
{
const unsigned C_SIZE = 512;
__shared__ int shrdMem[512];
const int radius0 = fLen0-1;
const int radius1 = fLen1-1;
const int padding0 = 2*radius0;
const int padding1 = 2*radius1;
const int shrdLen0 = THREADS_X + padding0;
const int shrdLen1 = THREADS_Y + padding1;
unsigned b0 = blockIdx.x / nBBS0;
unsigned b1 = (blockIdx.y + blockIdx.z * gridDim.y) / nBBS1;
int *dst = (int *)out_ptr + (b0 * out_strides[2] + /* activated with batched input signal */
o2 * out_strides[2] + /* activated with batched input filter */
b1 * out_strides[3] + /* activated with batched input signal */
o3 * out_strides[3]); /* activated with batched input filter */
const int *src = (const int *)signal_ptr + (b0 * signal_strides[2] + /* activated with batched input signal */
s2 * signal_strides[2] + /* activated with batched input filter */
b1 * signal_strides[3] + /* activated with batched input signal */
s3 * signal_strides[3]); /* activated with batched input filter */
int lx = threadIdx.x;
int ly = threadIdx.y;
int gx = THREADS_X * (blockIdx.x-b0*nBBS0) + lx;
int gy = THREADS_Y * ((blockIdx.y + blockIdx.z * gridDim.y) -b1*nBBS1) + ly;
if(b1 >= out_dims[3])
return;
int s0 = signal_strides[0];
int s1 = signal_strides[1];
int d0 = signal_dims[0];
int d1 = signal_dims[1];
// below loops are traditional loops, they only run multiple
// times filter length is more than launch size
for (int b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) {
int j = gy2-radius1;
bool is_j = j>=0 && j<d1;
// move row_set THREADS_Y along coloumns
for (int a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) {
int i = gx2-radius0;
bool is_i = i>=0 && i<d0;
shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : 0);
}
}
__syncthreads();
if (gx<out_dims[0] && gy<out_dims[1]) {
int ci = lx + radius0 + (expand ? 0 : fLen0>>1);
int cj = ly + radius1 + (expand ? 0 : fLen1>>1);
int accum = 0;
for(int fj=0; fj<fLen1; ++fj) {
for(int fi=0; fi<fLen0; ++fi) {
int s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)];
accum = accum + s_val;
}
}
dst[gy*out_strides[1]+gx] = (int)accum;
}
} |
13d5205fb77b2687c249e9622817885d668b3abf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <include/labwork.h>
#include <hip/hip_runtime_api.h>
#include <omp.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2018, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
hipMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
timer.start();
switch (lwNum) {
case 1:
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
labwork.labwork1_OpenMP();
labwork.saveOutputImage("labwork2-openmp-out.jpg");
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
labwork.labwork5_CPU();
labwork.saveOutputImage("labwork5-cpu-out.jpg");
labwork.labwork5_GPU();
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// do something here
}
int getSPcores(hipDeviceProp_t devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int nDevices = 0;
// get all devices
hipGetDeviceCount(&nDevices);
printf("Number total of GPU : %d\n\n", nDevices);
for (int i = 0; i < nDevices; i++){
// get informations from individual device
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
// something more here
}
}
void Labwork::labwork3_GPU() {
// Calculate number of pixels
// Allocate CUDA memory
// Copy CUDA Memory from CPU to GPU
// Processing
// Copy CUDA Memory from GPU to CPU
// Cleaning
}
void Labwork::labwork4_GPU() {
}
void Labwork::labwork5_GPU(bool shared) {
}
void Labwork::labwork6_GPU() {
}
void Labwork::labwork7_GPU() {
}
void Labwork::labwork8_GPU() {
}
void Labwork::labwork9_GPU() {
}
void Labwork::labwork10_GPU(){
}
| 13d5205fb77b2687c249e9622817885d668b3abf.cu | #include <stdio.h>
#include <include/labwork.h>
#include <cuda_runtime_api.h>
#include <omp.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2018, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
cudaMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
timer.start();
switch (lwNum) {
case 1:
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
labwork.labwork1_OpenMP();
labwork.saveOutputImage("labwork2-openmp-out.jpg");
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
labwork.labwork5_CPU();
labwork.saveOutputImage("labwork5-cpu-out.jpg");
labwork.labwork5_GPU();
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
printf("[ALGO ONLY] labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
// do something here
}
int getSPcores(cudaDeviceProp devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int nDevices = 0;
// get all devices
cudaGetDeviceCount(&nDevices);
printf("Number total of GPU : %d\n\n", nDevices);
for (int i = 0; i < nDevices; i++){
// get informations from individual device
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
// something more here
}
}
void Labwork::labwork3_GPU() {
// Calculate number of pixels
// Allocate CUDA memory
// Copy CUDA Memory from CPU to GPU
// Processing
// Copy CUDA Memory from GPU to CPU
// Cleaning
}
void Labwork::labwork4_GPU() {
}
void Labwork::labwork5_GPU(bool shared) {
}
void Labwork::labwork6_GPU() {
}
void Labwork::labwork7_GPU() {
}
void Labwork::labwork8_GPU() {
}
void Labwork::labwork9_GPU() {
}
void Labwork::labwork10_GPU(){
}
|
3fd5edad014e9cee327017e7c942af7c97c79bfe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "colMul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int M = 2;
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
colMul), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,M,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
colMul), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,M,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
colMul), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,M,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3fd5edad014e9cee327017e7c942af7c97c79bfe.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "colMul.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int M = 2;
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
colMul<<<gridBlock,threadBlock>>>(a,b,c,M,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
colMul<<<gridBlock,threadBlock>>>(a,b,c,M,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
colMul<<<gridBlock,threadBlock>>>(a,b,c,M,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
063b08d036315708547e2cb3239a54b29d72ec69.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cassert>
#include <cmath>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
cv::Mat imageRGBA;
cv::Mat imageGrey;
uchar4 *d_rgbaImage__;
unsigned char *d_greyImage__;
size_t numRows() { return imageRGBA.rows; }
size_t numCols() { return imageRGBA.cols; }
void preProcess(uchar4 **inputImage, unsigned char **greyImage,
uchar4 **d_rgbaImage, unsigned char **d_greyImage,
const std::string &filename) {
//make sure the context initializes ok
hipFree(0);
cv::Mat image;
image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA); // CV_BGR2GRAY
//allocate memory for the output
imageGrey.create(image.rows, image.cols, CV_8UC1);
//This shouldn't ever happen given the way the images are created
//at least based upon my limited understanding of OpenCV, but better to check
if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
*greyImage = imageGrey.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
hipMalloc(d_rgbaImage, sizeof(uchar4) * numPixels);
hipMalloc(d_greyImage, sizeof(unsigned char) * numPixels);
hipMemset(*d_greyImage, 0, numPixels * sizeof(unsigned char)); //make sure no memory is left laying around
//copy input array to the GPU
hipMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice);
d_rgbaImage__ = *d_rgbaImage;
d_greyImage__ = *d_greyImage;
}
void postProcess(const std::string& output_file, unsigned char* data_ptr) {
cv::Mat output(numRows(), numCols(), CV_8UC1, (void*)data_ptr);
//output the image
cv::imwrite(output_file.c_str(), output);
}
__global__
void rgbaToGreyscaleCudaKernel(const uchar4* const rgbaImage,
unsigned char* const greyImage,
const int numRows, const int numCols)
{
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if(pointIndex<numRows*numCols) { // this is necessary only if too many threads are started
uchar4 const imagePoint = rgbaImage[pointIndex];
greyImage[pointIndex] = .299f*imagePoint.x + .587f*imagePoint.y + .114f*imagePoint.z;
}
}
void rgbaToGreyscaleCuda(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, const size_t numRows, const size_t numCols)
{
const int blockThreadSize = 512;
const int numberOfBlocks = 1 + ((numRows*numCols - 1) / blockThreadSize); // a/b rounded up
const dim3 blockSize(blockThreadSize, 1, 1);
const dim3 gridSize(numberOfBlocks , 1, 1);
hipLaunchKernelGGL(( rgbaToGreyscaleCudaKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
}
void processUsingCuda(std::string input_file, std::string output_file) {
// pointers to images in CPU's memory (h_) and GPU's memory (d_)
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
//load the image and give us our input and output pointers
preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file);
rgbaToGreyscaleCuda(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
hipDeviceSynchronize();
size_t numPixels = numRows()*numCols();
hipMemcpy(h_greyImage, d_greyImage, sizeof(unsigned char) * numPixels, hipMemcpyDeviceToHost);
//check results and output the grey image
postProcess(output_file, h_greyImage);
}
int main(int argc, char **argv) {
processUsingCuda("flip.jpg", "gris_flip.jpg");
return 0;
}
| 063b08d036315708547e2cb3239a54b29d72ec69.cu | #include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cassert>
#include <cmath>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
cv::Mat imageRGBA;
cv::Mat imageGrey;
uchar4 *d_rgbaImage__;
unsigned char *d_greyImage__;
size_t numRows() { return imageRGBA.rows; }
size_t numCols() { return imageRGBA.cols; }
void preProcess(uchar4 **inputImage, unsigned char **greyImage,
uchar4 **d_rgbaImage, unsigned char **d_greyImage,
const std::string &filename) {
//make sure the context initializes ok
cudaFree(0);
cv::Mat image;
image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "Couldn't open file: " << filename << std::endl;
exit(1);
}
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA); // CV_BGR2GRAY
//allocate memory for the output
imageGrey.create(image.rows, image.cols, CV_8UC1);
//This shouldn't ever happen given the way the images are created
//at least based upon my limited understanding of OpenCV, but better to check
if (!imageRGBA.isContinuous() || !imageGrey.isContinuous()) {
std::cerr << "Images aren't continuous!! Exiting." << std::endl;
exit(1);
}
*inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0);
*greyImage = imageGrey.ptr<unsigned char>(0);
const size_t numPixels = numRows() * numCols();
//allocate memory on the device for both input and output
cudaMalloc(d_rgbaImage, sizeof(uchar4) * numPixels);
cudaMalloc(d_greyImage, sizeof(unsigned char) * numPixels);
cudaMemset(*d_greyImage, 0, numPixels * sizeof(unsigned char)); //make sure no memory is left laying around
//copy input array to the GPU
cudaMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice);
d_rgbaImage__ = *d_rgbaImage;
d_greyImage__ = *d_greyImage;
}
void postProcess(const std::string& output_file, unsigned char* data_ptr) {
cv::Mat output(numRows(), numCols(), CV_8UC1, (void*)data_ptr);
//output the image
cv::imwrite(output_file.c_str(), output);
}
__global__
void rgbaToGreyscaleCudaKernel(const uchar4* const rgbaImage,
unsigned char* const greyImage,
const int numRows, const int numCols)
{
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
const long pointIndex = threadIdx.x + blockDim.x*blockIdx.x;
if(pointIndex<numRows*numCols) { // this is necessary only if too many threads are started
uchar4 const imagePoint = rgbaImage[pointIndex];
greyImage[pointIndex] = .299f*imagePoint.x + .587f*imagePoint.y + .114f*imagePoint.z;
}
}
void rgbaToGreyscaleCuda(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, const size_t numRows, const size_t numCols)
{
const int blockThreadSize = 512;
const int numberOfBlocks = 1 + ((numRows*numCols - 1) / blockThreadSize); // a/b rounded up
const dim3 blockSize(blockThreadSize, 1, 1);
const dim3 gridSize(numberOfBlocks , 1, 1);
rgbaToGreyscaleCudaKernel<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
}
void processUsingCuda(std::string input_file, std::string output_file) {
// pointers to images in CPU's memory (h_) and GPU's memory (d_)
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
//load the image and give us our input and output pointers
preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file);
rgbaToGreyscaleCuda(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
cudaDeviceSynchronize();
size_t numPixels = numRows()*numCols();
cudaMemcpy(h_greyImage, d_greyImage, sizeof(unsigned char) * numPixels, cudaMemcpyDeviceToHost);
//check results and output the grey image
postProcess(output_file, h_greyImage);
}
int main(int argc, char **argv) {
processUsingCuda("flip.jpg", "gris_flip.jpg");
return 0;
}
|
7009aff618fcfca4921c07c9a5a7ee1f2c8c7d9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudacommon.h"
#include <stdio.h>
#include "ResultDatabase.h"
#include "OptionParser.h"
#include "ProgressBar.h"
#include "Utility.h"
// Forward Declarations for benchmark kernels
__global__ void MAddU(float *target, float val1, float val2);
__global__ void MulMAddU(float *target, float val1, float val2);
__global__ void MAddU_DP(double *target, double val1, double val2);
__global__ void MulMAddU_DP(double *target, double val1, double val2);
// Add kernels
template <class T> __global__ void Add1(T *data, int nIters, T v);
template <class T> __global__ void Add2(T *data, int nIters, T v);
template <class T> __global__ void Add4(T *data, int nIters, T v);
template <class T> __global__ void Add8(T *data, int nIters, T v);
// Mul kernels
template <class T> __global__ void Mul1(T *data, int nIters, T v);
template <class T> __global__ void Mul2(T *data, int nIters, T v);
template <class T> __global__ void Mul4(T *data, int nIters, T v);
template <class T> __global__ void Mul8(T *data, int nIters, T v);
// MAdd kernels
template <class T> __global__ void MAdd1(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MAdd2(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MAdd4(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MAdd8(T *data, int nIters, T v1, T v2);
// MulMAdd kernels
template <class T> __global__ void MulMAdd1(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MulMAdd2(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MulMAdd4(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MulMAdd8(T *data, int nIters, T v1, T v2);
// Forward Declarations
// execute simple precision and double precision versions of the benchmarks
template <class T> void
RunTest(ResultDatabase &resultDB, int npasses, int verbose, int quiet,
float repeatF, ProgressBar &pb, const char* precision);
// Block size to use in measurements
#define BLOCK_SIZE_SP 256
#define BLOCK_SIZE_DP 128
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: December 11, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op)
{
}
// ****************************************************************************
// Function: runBenchmark
//
// Purpose:
// This benchmark measures the max floating point capability of a gpu using
// a highly unrolled kernel with a large number of floating point operations.
//
// Arguments:
// resultDB: the benchmark stores its results in this ResultDatabase
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: September 08, 2009
//
// Modifications:
// Jeremy Meredith, Fri May 14 11:23:10 EDT 2010
// Made double precision a copy of SP, with a few tweaks.
// Allow any capability at least 1.3 or 2.0 to use double.
//
// Gabriel Marin, Thu Jan 13, 2010
// Add the auto-generated kernels from the OpenCL implementation.
// DP / SP implemented as templates for the new kernels.
// Add text progress bar.
//
// ****************************************************************************
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
bool verbose = op.getOptionBool("verbose");
bool quiet = op.getOptionBool("quiet");
const unsigned int passes = op.getOptionInt("passes");
// Test to see if this device supports double precision
int device;
hipGetDevice(&device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
bool doDouble = false;
if ((deviceProp.major == 1 && deviceProp.minor >= 3) ||
(deviceProp.major >= 2))
{
doDouble = true;
}
// determine the speed of the device first. This determines the number of
// iterations for all kernels.
const unsigned int halfBufSize = 1024*1024;
unsigned int halfNumFloats = halfBufSize / sizeof(float), numFloats = 2*halfNumFloats;
float *gpu_mem, *hostMem;
hostMem = new float[numFloats];
hipMalloc((void**)&gpu_mem, halfBufSize*2);
CHECK_CUDA_ERROR();
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (float)(drand48()*10.0);
}
// Variables used for timing
float t = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
CHECK_CUDA_ERROR();
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, halfBufSize*2, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Thread block configuration
dim3 threads(BLOCK_SIZE_SP,1,1);
dim3 blocks((numFloats)/BLOCK_SIZE_SP,1,1);
// Decrease block size for devices with lower compute
// capability. Avoids an out of resources error
if ((deviceProp.major == 1 && deviceProp.minor <= 2))
{
threads.x = 128;
blocks.x = (numFloats)/128;
}
// Benchmark the MulMAdd2 kernel to compute a scaling factor.
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MulMAdd2<float>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, 10, 3.75, 0.355);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
double repeatF = 1.1e07 / (double)t;
fprintf (stdout, "Adjust repeat factor = %lg\n", repeatF);
delete[] hostMem;
hipFree((void*)gpu_mem);
CHECK_CUDA_ERROR();
// Initialize progress bar. We have 16 generic kernels and 2 hand tuned kernels.
// Each kernel is executed 'passes' number of times for each single precision and
// double precision (if avaialble).
int totalRuns = 18*passes;
if (doDouble)
totalRuns <<= 1; // multiply by 2
ProgressBar pb(totalRuns);
if (!verbose && !quiet)
pb.Show(stdout);
// Run single precision kernels
RunTest<float> (resultDB, passes, verbose, quiet,
repeatF, pb, "-SP");
if (doDouble)
RunTest<double> (resultDB, passes, verbose, quiet,
repeatF, pb, "-DP");
else
{
const char atts[] = "DP_Not_Supported";
for (int pas=0 ; pas<passes ; ++pas)
{
resultDB.AddResult("Add1-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Add2-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Add4-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Add8-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Mul1-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Mul2-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Mul4-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Mul8-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MAdd1-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MAdd2-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MAdd4-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MAdd8-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAdd1-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAdd2-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAdd4-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAdd8-DP", atts, "GFLOPS", FLT_MAX);
// we deal with these separately
//resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS", FLT_MAX);
//resultDB.AddResult("MAddU-DP", atts, "GFLOPS", FLT_MAX);
}
}
// Problem Size
int w = 2048, h = 2048;
float root2 = 1.4142;
if (repeatF<1)
while (repeatF*root2<1) {
repeatF*=2;
if (w>h) w >>= 1;
else h >>= 1;
}
/*
When auto-scaling up, we must make sure that we do not exceed
some device limit for block size. Disable for now.
*/
/*
else
while (repeatF>root2) {
repeatF *= 0.5;
if (w>h) h <<= 1;
else w <<= 1;
}
*/
const int nbytes_sp = w * h * sizeof(float);
// Allocate gpu memory
float *target_sp;
hipMalloc((void**)&target_sp, nbytes_sp);
CHECK_CUDA_ERROR();
// Get a couple non-zero random numbers
float val1 = 0, val2 = 0;
while (val1==0 || val2==0)
{
val1 = drand48();
val2 = drand48();
}
blocks.x = (w*h)/threads.x;
for (int p = 0; p < passes; p++)
{
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MAddU), dim3(blocks), dim3(threads) , 0, 0, target_sp, val1, val2);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t /= 1.e3;
// Add result
char atts[1024];
long int nflopsPerPixel = ((2*32)*10*10*5) + 61;
sprintf(atts, "Size:%d", w*h);
resultDB.AddResult("MAddU-SP", atts, "GFLOPS",
(((double)nflopsPerPixel)*w*h) / (t*1.e9));
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MulMAddU), dim3(blocks), dim3(threads) , 0, 0, target_sp, val1, val2);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t /= 1.e3;
// Add result
nflopsPerPixel = ((3*8)*10*10*5) + 13;
sprintf(atts, "Size:%d",w*h);
resultDB.AddResult("MulMAddU-SP", atts, "GFLOPS",
(((double)nflopsPerPixel)*w*h) / (t*1.e9));
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
}
hipFree((void*)target_sp);
CHECK_CUDA_ERROR();
if (doDouble)
{
const int nbytes_dp = w * h * sizeof(double);
double *target_dp;
hipMalloc((void**)&target_dp, nbytes_dp);
CHECK_CUDA_ERROR();
// Thread block configuration
dim3 threads(BLOCK_SIZE_DP,1,1);
dim3 blocks((w*h)/BLOCK_SIZE_DP,1,1);
const unsigned int passes = op.getOptionInt("passes");
for (int p = 0; p < passes; p++)
{
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MAddU_DP), dim3(blocks), dim3(threads) , 0, 0, target_dp, val1, val2);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t /= 1.e3;
// Add result
char atts[1024];
long int nflopsPerPixel = ((2*32)*10*10*5) + 61;
sprintf(atts, "Size:%d", w*h);
resultDB.AddResult("MAddU-DP", atts, "GFLOPS",
(((double)nflopsPerPixel)*w*h) / (t*1.e9));
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MulMAddU_DP), dim3(blocks), dim3(threads) , 0, 0, target_dp, val1, val2);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t /= 1.e3;
// Add result
nflopsPerPixel = ((3*8)*10*10*5) + 13;
sprintf(atts, "Size:%d",w*h);
resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS",
(((double)nflopsPerPixel)*w*h) / (t*1.e9));
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
}
hipFree((void*)target_dp);
CHECK_CUDA_ERROR();
}
else
{
// Add result
char atts[1024];
sprintf(atts, "Size:%d", w * h);
// resultDB requires neg entry for every possible result
const unsigned int passes = op.getOptionInt("passes");
for (int p = 0; p < passes; p++) {
resultDB.AddResult("MAddU-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS", FLT_MAX);
}
}
if (!verbose)
fprintf (stdout, "\n\n");
hipEventDestroy(start);
hipEventDestroy(stop);
}
// ****************************************************************************
// Function: RunTest
//
// Purpose:
// Template function used for specializing the generic kernels for
// single precision and double precision.
//
// Arguments:
// resultDB: the benchmark stores its results in this ResultDatabase
//
// Returns: nothing
//
// Programmer: Gabriel Marin
// Creation: January 13, 2010
//
// ****************************************************************************
template <class T> void
RunTest(ResultDatabase &resultDB,
int npasses,
int verbose,
int quiet,
float repeatF,
ProgressBar &pb,
const char* precision)
{
T *gpu_mem;
char sizeStr[128];
T *hostMem, *hostMem2;
int realRepeats = (int)round(repeatF*20);
if (realRepeats < 2)
realRepeats = 2;
// Alloc host memory
int halfNumFloats = 1024*1024;
int numFloats = 2*halfNumFloats;
hostMem = new T[numFloats];
hostMem2 = new T[numFloats];
hipMalloc((void**)&gpu_mem, numFloats*sizeof(T));
CHECK_CUDA_ERROR();
// Variables used for timing
float t = 0.0f;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
CHECK_CUDA_ERROR();
// Thread block configuration
dim3 threads(128,1,1);
dim3 blocks((numFloats)/128,1,1);
for (int pass=0 ; pass<npasses ; ++pass)
{
// Benchmark each generic kernel. Generate new random numbers for each run.
////////// Add1 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the Add1 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Add1<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 10.0);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
double flopCount = (double)numFloats * 1 * realRepeats * 240 * 1;
double gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Add1")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Add2 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the Add2 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Add2<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 10.0);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 1 * realRepeats * 120 * 2;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Add2")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Add4 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the Add4 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Add4<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 10.0);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 1 * realRepeats * 60 * 4;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Add4")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Add8 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the Add8 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Add8<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 10.0);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 1 * realRepeats * 30 * 8;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Add8")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Mul1 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the Mul1 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Mul1<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 1.01);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 200 * 1;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Mul1")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Mul2 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the Mul2 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Mul2<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 1.01);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 100 * 2;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Mul2")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Mul4 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the Mul4 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Mul4<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 1.01);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 50 * 4;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Mul4")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Mul8 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the Mul8 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( Mul8<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 1.01);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 25 * 8;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Mul8")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MAdd1 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the MAdd1 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MAdd1<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 10.0, 0.9899);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 240 * 1;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MAdd1")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MAdd2 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the MAdd2 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MAdd2<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 10.0, 0.9899);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 120 * 2;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MAdd2")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MAdd4 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the MAdd4 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MAdd4<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 10.0, 0.9899);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 60 * 4;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MAdd4")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MAdd8 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the MAdd8 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MAdd8<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 10.0, 0.9899);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 30 * 8;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MAdd8")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MulMAdd1 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the MulMAdd1 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MulMAdd1<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 3.75, 0.355);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 3 * realRepeats * 160 * 1;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MulMAdd1")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MulMAdd2 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the MulMAdd2 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MulMAdd2<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 3.75, 0.355);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 3 * realRepeats * 80 * 2;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MulMAdd2")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MulMAdd4 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the MulMAdd4 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MulMAdd4<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 3.75, 0.355);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 3 * realRepeats * 40 * 4;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MulMAdd4")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MulMAdd8 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Execute the MulMAdd8 kernel
t = 0.0f;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( MulMAdd8<T>), dim3(blocks), dim3(threads) , 0, 0, gpu_mem, realRepeats, 3.75, 0.355);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
hipEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 3 * realRepeats * 20 * 8;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MulMAdd8")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
hipEventRecord(start, 0); // do I even need this if I do not need the time?
hipMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
}
delete[] hostMem;
delete[] hostMem2;
hipFree((void*)gpu_mem);
CHECK_CUDA_ERROR();
hipEventDestroy(start);
hipEventDestroy(stop);
}
// Macros used to construct MaxFlops kernels
// Each mad OP is 32*2 = 64 FLOPS
#define OP { \
s0 = s6*s5 + s28; \
s1 = s7*s6 + s29; \
s2 = s8*s7 + s30; \
s3 = s9*s8 + s31; \
s4 = s10*s9 + s0; \
s5 = s11*s10 + s1; \
s6 = s12*s11 + s2; \
s7 = s13*s12 + s3; \
s8 = s14*s13 + s4; \
s9 = s15*s14 + s5; \
s10 = s16*s15 + s6; \
s11 = s17*s16 + s7; \
s12 = s18*s17 + s8; \
s13 = s19*s18 + s9; \
s14 = s20*s19 + s10; \
s15 = s21*s20 + s11; \
s16 = s22*s21 + s12; \
s17 = s23*s22 + s13; \
s18 = s24*s23 + s14; \
s19 = s25*s24 + s15; \
s20 = s26*s25 + s16; \
s21 = s27*s26 + s17; \
s22 = s28*s27 + s18; \
s23 = s29*s28 + s19; \
s24 = s30*s29 + s20; \
s25 = s31*s30 + s21; \
s26 = s0*s31 + s22; \
s27 = s1*s0 + s23; \
s28 = s2*s1 + s24; \
s29 = s3*s2 + s25; \
s30 = s4*s3 + s26; \
s31 = s5*s4 + s27; \
}
// so Each OP10 is 640 FLOPS
#define OP10 { OP OP OP OP OP OP OP OP OP OP }
// Each mad+mul MMOP is 8*3 = 24 FLOPS
#define MMOP { \
s0 = s4*s4 + s4; \
s6 = s0*s5; \
s1 = s5*s5 + s5; \
s7 = s1*s6; \
s2 = s6*s6 + s6; \
s0 = s2*s7; \
s3 = s7*s7 + s7; \
s1 = s3*s0; \
s4 = s0*s0 + s0; \
s2 = s4*s1; \
s5 = s1*s1 + s1; \
s3 = s5*s2; \
s6 = s2*s2 + s2; \
s4 = s6*s3; \
s7 = s3*s3 + s3; \
s5 = s7*s4; \
}
// So each OP10 is 240 FLOPS
#define MMOP10 { MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP }
// Benchmark Kernels
__global__ void MAddU(float *target, float val1, float val2)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
// Create a bunch of local variables we can use up to 32 steps..
register float v0=val1, v1=val2, v2=v0+v1, v3=v0+v2;
register float v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6;
register float v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10;
register float v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14;
register float v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2;
register float v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6;
register float v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10;
register float v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14;
register float s0=v0, s1=v1, s2=v2, s3=v3;
register float s4=v4, s5=v5, s6=v6, s7=v7;
register float s8=v8, s9=v9, s10=v10, s11=v11;
register float s12=v12, s13=v13, s14=v14, s15=v15;
register float s16=v16, s17=v17, s18=v18, s19=v19;
register float s20=v20, s21=v21, s22=v22, s23=v23;
register float s24=v24, s25=v25, s26=v26, s27=v27;
register float s28=v28, s29=v29, s30=v30, s31=v31;
// 10 OP10s inside the loop = 6400 FLOPS in the .ptx code
// and 5 loops of 10 OP10s = 32000 FLOPS per pixel total
for (int i=0; i<5; i++)
{
OP10; OP10; OP10; OP10; OP10;
OP10; OP10; OP10; OP10; OP10;
}
float result = (s0+s1+s2+s3+s4+s5+s6+s7+
s8+s9+s10+s11+s12+s13+s14+s15 +
s16+s17+s18+s19+s20+s21+s22+s23+
s24+s25+s26+s27+s28+s29+s30+s31);
target[index] = result;
}
__global__ void MAddU_DP(double *target, double val1, double val2)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
register double v0=val1, v1=val2, v2=v0+v1, v3=v0+v2;
register double v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6;
register double v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10;
register double v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14;
register double v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2;
register double v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6;
register double v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10;
register double v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14;
register double s0=v0, s1=v1, s2=v2, s3=v3;
register double s4=v4, s5=v5, s6=v6, s7=v7;
register double s8=v8, s9=v9, s10=v10, s11=v11;
register double s12=v12, s13=v13, s14=v14, s15=v15;
register double s16=v16, s17=v17, s18=v18, s19=v19;
register double s20=v20, s21=v21, s22=v22, s23=v23;
register double s24=v24, s25=v25, s26=v26, s27=v27;
register double s28=v28, s29=v29, s30=v30, s31=v31;
// 10 OP10s inside the loop = 6400 FLOPS in the .ptx code
// and 5 loops of 10 OP10s = 32000 FLOPS per pixel total
for (int i=0; i<5; i++)
{
OP10; OP10; OP10; OP10; OP10;
OP10; OP10; OP10; OP10; OP10;
}
double result = (s0+s1+s2+s3+s4+s5+s6+s7+
s8+s9+s10+s11+s12+s13+s14+s15 +
s16+s17+s18+s19+s20+s21+s22+s23+
s24+s25+s26+s27+s28+s29+s30+s31);
target[index] = result;
}
__global__ void MulMAddU(float *target, float val1, float val2)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
register float v0=val1, v1=val2, v2=v0+v1, v3=v0+v2;
register float v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6;
register float v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10;
register float v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14;
register float v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2;
register float v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6;
register float v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10;
register float v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14;
register float s0=v0, s1=v1, s2=v2, s3=v3;
register float s4=v4, s5=v5, s6=v6, s7=v7;
register float s8=v8, s9=v9, s10=v10, s11=v11;
register float s12=v12, s13=v13, s14=v14, s15=v15;
register float s16=v16, s17=v17, s18=v18, s19=v19;
register float s20=v20, s21=v21, s22=v22, s23=v23;
register float s24=v24, s25=v25, s26=v26, s27=v27;
register float s28=v28, s29=v29, s30=v30, s31=v31;
// 10 OP10s inside the loop = 2400 FLOPS in the .ptx code
// and 5 loops of 10 OP10s = 12000 FLOPS per pixel total
for (int i=0; i<5; i++)
{
MMOP10; MMOP10; MMOP10; MMOP10; MMOP10;
MMOP10; MMOP10; MMOP10; MMOP10; MMOP10;
}
float result = (s0+s1+s2+s3+s4+s5+s6+s7+
s8+s9+s10+s11+s12+s13+s14+s15 +
s16+s17+s18+s19+s20+s21+s22+s23+
s24+s25+s26+s27+s28+s29+s30+s31);
target[index] = result;
}
__global__ void MulMAddU_DP(double *target, double val1, double val2)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
register double v0=val1, v1=val2, v2=v0+v1, v3=v0+v2;
register double v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6;
register double v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10;
register double v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14;
register double v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2;
register double v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6;
register double v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10;
register double v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14;
register double s0=v0, s1=v1, s2=v2, s3=v3;
register double s4=v4, s5=v5, s6=v6, s7=v7;
register double s8=v8, s9=v9, s10=v10, s11=v11;
register double s12=v12, s13=v13, s14=v14, s15=v15;
register double s16=v16, s17=v17, s18=v18, s19=v19;
register double s20=v20, s21=v21, s22=v22, s23=v23;
register double s24=v24, s25=v25, s26=v26, s27=v27;
register double s28=v28, s29=v29, s30=v30, s31=v31;
// 10 OP10s inside the loop = 2400 FLOPS in the .ptx code
// and 5 loops of 10 OP10s = 12000 FLOPS per pixel total
for (int i=0; i<5; i++)
{
MMOP10; MMOP10; MMOP10; MMOP10; MMOP10;
MMOP10; MMOP10; MMOP10; MMOP10; MMOP10;
}
double result = (s0+s1+s2+s3+s4+s5+s6+s7+
s8+s9+s10+s11+s12+s13+s14+s15 +
s16+s17+s18+s19+s20+s21+s22+s23+
s24+s25+s26+s27+s28+s29+s30+s31);
target[index] = result;
}
// v = 10.0
#define ADD1_OP s=v-s;
#define ADD2_OP ADD1_OP s2=v-s2;
#define ADD4_OP ADD2_OP s3=v-s3; s4=v-s4;
#define ADD8_OP ADD4_OP s5=v-s5; s6=v-s6; s7=v-s7; s8=v-s8;
// v = 1.01
#define MUL1_OP s=s*s*v;
#define MUL2_OP MUL1_OP s2=s2*s2*v;
#define MUL4_OP MUL2_OP s3=s3*s3*v; s4=s4*s4*v;
#define MUL8_OP MUL4_OP s5=s5*s5*v; s6=s6*s6*v; s7=s7*s7*v; s8=s8*s8*v;
// v1 = 10.0, v2 = 0.9899
#define MADD1_OP s=v1-s*v2;
#define MADD2_OP MADD1_OP s2=v1-s2*v2;
#define MADD4_OP MADD2_OP s3=v1-s3*v2; s4=v1-s4*v2;
#define MADD8_OP MADD4_OP s5=v1-s5*v2; s6=v1-s6*v2; s7=v1-s7*v2; s8=v1-s8*v2;
// v1 = 3.75, v2 = 0.355
#define MULMADD1_OP s=(v1-v2*s)*s;
#define MULMADD2_OP MULMADD1_OP s2=(v1-v2*s2)*s2;
#define MULMADD4_OP MULMADD2_OP s3=(v1-v2*s3)*s3; s4=(v1-v2*s4)*s4;
#define MULMADD8_OP MULMADD4_OP s5=(v1-v2*s5)*s5; s6=(v1-v2*s6)*s6; s7=(v1-v2*s7)*s7; s8=(v1-v2*s8)*s8;
#define ADD1_MOP20 \
ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP \
ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP
#define ADD2_MOP20 \
ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP \
ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP
#define ADD4_MOP10 \
ADD4_OP ADD4_OP ADD4_OP ADD4_OP ADD4_OP \
ADD4_OP ADD4_OP ADD4_OP ADD4_OP ADD4_OP
#define ADD8_MOP5 \
ADD8_OP ADD8_OP ADD8_OP ADD8_OP ADD8_OP
#define MUL1_MOP20 \
MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP \
MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP
#define MUL2_MOP20 \
MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP \
MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP
#define MUL4_MOP10 \
MUL4_OP MUL4_OP MUL4_OP MUL4_OP MUL4_OP \
MUL4_OP MUL4_OP MUL4_OP MUL4_OP MUL4_OP
#define MUL8_MOP5 \
MUL8_OP MUL8_OP MUL8_OP MUL8_OP MUL8_OP
#define MADD1_MOP20 \
MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP \
MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP
#define MADD2_MOP20 \
MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP \
MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP
#define MADD4_MOP10 \
MADD4_OP MADD4_OP MADD4_OP MADD4_OP MADD4_OP \
MADD4_OP MADD4_OP MADD4_OP MADD4_OP MADD4_OP
#define MADD8_MOP5 \
MADD8_OP MADD8_OP MADD8_OP MADD8_OP MADD8_OP
#define MULMADD1_MOP20 \
MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP \
MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP
#define MULMADD2_MOP20 \
MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP \
MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP
#define MULMADD4_MOP10 \
MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP \
MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP
#define MULMADD8_MOP5 \
MULMADD8_OP MULMADD8_OP MULMADD8_OP MULMADD8_OP MULMADD8_OP
template <class T>
__global__ void Add1(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid];
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 12 more times for 240 operations total.
*/
ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20
ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20
}
data[gid] = s;
}
template <class T>
__global__ void Add2(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 6 more times for 120 operations total.
*/
ADD2_MOP20 ADD2_MOP20 ADD2_MOP20
ADD2_MOP20 ADD2_MOP20 ADD2_MOP20
}
data[gid] = s+s2;
}
template <class T>
__global__ void Add4(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 10 operations.
Unroll 6 more times for 60 operations total.
*/
ADD4_MOP10 ADD4_MOP10 ADD4_MOP10
ADD4_MOP10 ADD4_MOP10 ADD4_MOP10
}
data[gid] = (s+s2)+(s3+s4);
}
template <class T>
__global__ void Add8(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 5 operations.
Unroll 6 more times for 30 operations total.
*/
ADD8_MOP5 ADD8_MOP5 ADD8_MOP5
ADD8_MOP5 ADD8_MOP5 ADD8_MOP5
}
data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8));
}
template <class T>
__global__ void Mul1(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid]-data[gid]+0.999f;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 10 more times for 200 operations total.
*/
MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20
MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20
}
data[gid] = s;
}
template <class T>
__global__ void Mul2(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 5 more times for 100 operations total.
*/
MUL2_MOP20 MUL2_MOP20 MUL2_MOP20
MUL2_MOP20 MUL2_MOP20
}
data[gid] = s+s2;
}
template <class T>
__global__ void Mul4(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f, s3=s-0.0002f, s4=s-0.0003f;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 10 operations.
Unroll 5 more times for 50 operations total.
*/
MUL4_MOP10 MUL4_MOP10 MUL4_MOP10
MUL4_MOP10 MUL4_MOP10
}
data[gid] = (s+s2)+(s3+s4);
}
template <class T>
__global__ void Mul8(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f, s3=s-0.0002f, s4=s-0.0003f, s5=s-0.0004f, s6=s-0.0005f, s7=s-0.0006f, s8=s-0.0007f;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 5 operations.
Unroll 5 more times for 25 operations total.
*/
MUL8_MOP5 MUL8_MOP5 MUL8_MOP5
MUL8_MOP5 MUL8_MOP5
}
data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8));
}
template <class T>
__global__ void MAdd1(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid];
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 12 more times for 240 operations total.
*/
MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20
MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20
}
data[gid] = s;
}
template <class T>
__global__ void MAdd2(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 6 more times for 120 operations total.
*/
MADD2_MOP20 MADD2_MOP20 MADD2_MOP20
MADD2_MOP20 MADD2_MOP20 MADD2_MOP20
}
data[gid] = s+s2;
}
template <class T>
__global__ void MAdd4(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 10 operations.
Unroll 6 more times for 60 operations total.
*/
MADD4_MOP10 MADD4_MOP10 MADD4_MOP10
MADD4_MOP10 MADD4_MOP10 MADD4_MOP10
}
data[gid] = (s+s2)+(s3+s4);
}
template <class T>
__global__ void MAdd8(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 5 operations.
Unroll 6 more times for 30 operations total.
*/
MADD8_MOP5 MADD8_MOP5 MADD8_MOP5
MADD8_MOP5 MADD8_MOP5 MADD8_MOP5
}
data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8));
}
template <class T>
__global__ void MulMAdd1(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid];
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 8 more times for 160 operations total.
*/
MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20
MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20
}
data[gid] = s;
}
template <class T>
__global__ void MulMAdd2(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 4 more times for 80 operations total.
*/
MULMADD2_MOP20 MULMADD2_MOP20
MULMADD2_MOP20 MULMADD2_MOP20
}
data[gid] = s+s2;
}
template <class T>
__global__ void MulMAdd4(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 10 operations.
Unroll 4 more times for 40 operations total.
*/
MULMADD4_MOP10 MULMADD4_MOP10
MULMADD4_MOP10 MULMADD4_MOP10
}
data[gid] = (s+s2)+(s3+s4);
}
template <class T>
__global__ void MulMAdd8(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 5 operations.
Unroll 4 more times for 20 operations total.
*/
MULMADD8_MOP5 MULMADD8_MOP5
MULMADD8_MOP5 MULMADD8_MOP5
}
data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8));
}
| 7009aff618fcfca4921c07c9a5a7ee1f2c8c7d9d.cu | #include "cudacommon.h"
#include <stdio.h>
#include "ResultDatabase.h"
#include "OptionParser.h"
#include "ProgressBar.h"
#include "Utility.h"
// Forward Declarations for benchmark kernels
__global__ void MAddU(float *target, float val1, float val2);
__global__ void MulMAddU(float *target, float val1, float val2);
__global__ void MAddU_DP(double *target, double val1, double val2);
__global__ void MulMAddU_DP(double *target, double val1, double val2);
// Add kernels
template <class T> __global__ void Add1(T *data, int nIters, T v);
template <class T> __global__ void Add2(T *data, int nIters, T v);
template <class T> __global__ void Add4(T *data, int nIters, T v);
template <class T> __global__ void Add8(T *data, int nIters, T v);
// Mul kernels
template <class T> __global__ void Mul1(T *data, int nIters, T v);
template <class T> __global__ void Mul2(T *data, int nIters, T v);
template <class T> __global__ void Mul4(T *data, int nIters, T v);
template <class T> __global__ void Mul8(T *data, int nIters, T v);
// MAdd kernels
template <class T> __global__ void MAdd1(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MAdd2(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MAdd4(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MAdd8(T *data, int nIters, T v1, T v2);
// MulMAdd kernels
template <class T> __global__ void MulMAdd1(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MulMAdd2(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MulMAdd4(T *data, int nIters, T v1, T v2);
template <class T> __global__ void MulMAdd8(T *data, int nIters, T v1, T v2);
// Forward Declarations
// execute simple precision and double precision versions of the benchmarks
template <class T> void
RunTest(ResultDatabase &resultDB, int npasses, int verbose, int quiet,
float repeatF, ProgressBar &pb, const char* precision);
// Block size to use in measurements
#define BLOCK_SIZE_SP 256
#define BLOCK_SIZE_DP 128
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: December 11, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op)
{
}
// ****************************************************************************
// Function: runBenchmark
//
// Purpose:
// This benchmark measures the max floating point capability of a gpu using
// a highly unrolled kernel with a large number of floating point operations.
//
// Arguments:
// resultDB: the benchmark stores its results in this ResultDatabase
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: September 08, 2009
//
// Modifications:
// Jeremy Meredith, Fri May 14 11:23:10 EDT 2010
// Made double precision a copy of SP, with a few tweaks.
// Allow any capability at least 1.3 or 2.0 to use double.
//
// Gabriel Marin, Thu Jan 13, 2010
// Add the auto-generated kernels from the OpenCL implementation.
// DP / SP implemented as templates for the new kernels.
// Add text progress bar.
//
// ****************************************************************************
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
bool verbose = op.getOptionBool("verbose");
bool quiet = op.getOptionBool("quiet");
const unsigned int passes = op.getOptionInt("passes");
// Test to see if this device supports double precision
int device;
cudaGetDevice(&device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
bool doDouble = false;
if ((deviceProp.major == 1 && deviceProp.minor >= 3) ||
(deviceProp.major >= 2))
{
doDouble = true;
}
// determine the speed of the device first. This determines the number of
// iterations for all kernels.
const unsigned int halfBufSize = 1024*1024;
unsigned int halfNumFloats = halfBufSize / sizeof(float), numFloats = 2*halfNumFloats;
float *gpu_mem, *hostMem;
hostMem = new float[numFloats];
cudaMalloc((void**)&gpu_mem, halfBufSize*2);
CHECK_CUDA_ERROR();
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (float)(drand48()*10.0);
}
// Variables used for timing
float t = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
CHECK_CUDA_ERROR();
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, halfBufSize*2, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Thread block configuration
dim3 threads(BLOCK_SIZE_SP,1,1);
dim3 blocks((numFloats)/BLOCK_SIZE_SP,1,1);
// Decrease block size for devices with lower compute
// capability. Avoids an out of resources error
if ((deviceProp.major == 1 && deviceProp.minor <= 2))
{
threads.x = 128;
blocks.x = (numFloats)/128;
}
// Benchmark the MulMAdd2 kernel to compute a scaling factor.
t = 0.0f;
cudaEventRecord(start, 0);
MulMAdd2<float><<< blocks, threads >>>(gpu_mem, 10, 3.75, 0.355);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
double repeatF = 1.1e07 / (double)t;
fprintf (stdout, "Adjust repeat factor = %lg\n", repeatF);
delete[] hostMem;
cudaFree((void*)gpu_mem);
CHECK_CUDA_ERROR();
// Initialize progress bar. We have 16 generic kernels and 2 hand tuned kernels.
// Each kernel is executed 'passes' number of times for each single precision and
// double precision (if avaialble).
int totalRuns = 18*passes;
if (doDouble)
totalRuns <<= 1; // multiply by 2
ProgressBar pb(totalRuns);
if (!verbose && !quiet)
pb.Show(stdout);
// Run single precision kernels
RunTest<float> (resultDB, passes, verbose, quiet,
repeatF, pb, "-SP");
if (doDouble)
RunTest<double> (resultDB, passes, verbose, quiet,
repeatF, pb, "-DP");
else
{
const char atts[] = "DP_Not_Supported";
for (int pas=0 ; pas<passes ; ++pas)
{
resultDB.AddResult("Add1-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Add2-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Add4-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Add8-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Mul1-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Mul2-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Mul4-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("Mul8-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MAdd1-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MAdd2-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MAdd4-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MAdd8-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAdd1-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAdd2-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAdd4-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAdd8-DP", atts, "GFLOPS", FLT_MAX);
// we deal with these separately
//resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS", FLT_MAX);
//resultDB.AddResult("MAddU-DP", atts, "GFLOPS", FLT_MAX);
}
}
// Problem Size
int w = 2048, h = 2048;
float root2 = 1.4142;
if (repeatF<1)
while (repeatF*root2<1) {
repeatF*=2;
if (w>h) w >>= 1;
else h >>= 1;
}
/*
When auto-scaling up, we must make sure that we do not exceed
some device limit for block size. Disable for now.
*/
/*
else
while (repeatF>root2) {
repeatF *= 0.5;
if (w>h) h <<= 1;
else w <<= 1;
}
*/
const int nbytes_sp = w * h * sizeof(float);
// Allocate gpu memory
float *target_sp;
cudaMalloc((void**)&target_sp, nbytes_sp);
CHECK_CUDA_ERROR();
// Get a couple non-zero random numbers
float val1 = 0, val2 = 0;
while (val1==0 || val2==0)
{
val1 = drand48();
val2 = drand48();
}
blocks.x = (w*h)/threads.x;
for (int p = 0; p < passes; p++)
{
t = 0.0f;
cudaEventRecord(start, 0);
MAddU<<< blocks, threads >>>(target_sp, val1, val2);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t /= 1.e3;
// Add result
char atts[1024];
long int nflopsPerPixel = ((2*32)*10*10*5) + 61;
sprintf(atts, "Size:%d", w*h);
resultDB.AddResult("MAddU-SP", atts, "GFLOPS",
(((double)nflopsPerPixel)*w*h) / (t*1.e9));
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
cudaEventRecord(start, 0);
MulMAddU<<< blocks, threads >>>(target_sp, val1, val2);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t /= 1.e3;
// Add result
nflopsPerPixel = ((3*8)*10*10*5) + 13;
sprintf(atts, "Size:%d",w*h);
resultDB.AddResult("MulMAddU-SP", atts, "GFLOPS",
(((double)nflopsPerPixel)*w*h) / (t*1.e9));
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
}
cudaFree((void*)target_sp);
CHECK_CUDA_ERROR();
if (doDouble)
{
const int nbytes_dp = w * h * sizeof(double);
double *target_dp;
cudaMalloc((void**)&target_dp, nbytes_dp);
CHECK_CUDA_ERROR();
// Thread block configuration
dim3 threads(BLOCK_SIZE_DP,1,1);
dim3 blocks((w*h)/BLOCK_SIZE_DP,1,1);
const unsigned int passes = op.getOptionInt("passes");
for (int p = 0; p < passes; p++)
{
cudaEventRecord(start, 0);
MAddU_DP<<< blocks, threads >>>(target_dp, val1, val2);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t /= 1.e3;
// Add result
char atts[1024];
long int nflopsPerPixel = ((2*32)*10*10*5) + 61;
sprintf(atts, "Size:%d", w*h);
resultDB.AddResult("MAddU-DP", atts, "GFLOPS",
(((double)nflopsPerPixel)*w*h) / (t*1.e9));
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
cudaEventRecord(start, 0);
MulMAddU_DP<<< blocks, threads >>>(target_dp, val1, val2);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t /= 1.e3;
// Add result
nflopsPerPixel = ((3*8)*10*10*5) + 13;
sprintf(atts, "Size:%d",w*h);
resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS",
(((double)nflopsPerPixel)*w*h) / (t*1.e9));
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
}
cudaFree((void*)target_dp);
CHECK_CUDA_ERROR();
}
else
{
// Add result
char atts[1024];
sprintf(atts, "Size:%d", w * h);
// resultDB requires neg entry for every possible result
const unsigned int passes = op.getOptionInt("passes");
for (int p = 0; p < passes; p++) {
resultDB.AddResult("MAddU-DP", atts, "GFLOPS", FLT_MAX);
resultDB.AddResult("MulMAddU-DP", atts, "GFLOPS", FLT_MAX);
}
}
if (!verbose)
fprintf (stdout, "\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
// ****************************************************************************
// Function: RunTest
//
// Purpose:
// Template function used for specializing the generic kernels for
// single precision and double precision.
//
// Arguments:
// resultDB: the benchmark stores its results in this ResultDatabase
//
// Returns: nothing
//
// Programmer: Gabriel Marin
// Creation: January 13, 2010
//
// ****************************************************************************
template <class T> void
RunTest(ResultDatabase &resultDB,
int npasses,
int verbose,
int quiet,
float repeatF,
ProgressBar &pb,
const char* precision)
{
T *gpu_mem;
char sizeStr[128];
T *hostMem, *hostMem2;
int realRepeats = (int)round(repeatF*20);
if (realRepeats < 2)
realRepeats = 2;
// Alloc host memory
int halfNumFloats = 1024*1024;
int numFloats = 2*halfNumFloats;
hostMem = new T[numFloats];
hostMem2 = new T[numFloats];
cudaMalloc((void**)&gpu_mem, numFloats*sizeof(T));
CHECK_CUDA_ERROR();
// Variables used for timing
float t = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
CHECK_CUDA_ERROR();
// Thread block configuration
dim3 threads(128,1,1);
dim3 blocks((numFloats)/128,1,1);
for (int pass=0 ; pass<npasses ; ++pass)
{
// Benchmark each generic kernel. Generate new random numbers for each run.
////////// Add1 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the Add1 kernel
t = 0.0f;
cudaEventRecord(start, 0);
Add1<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
double flopCount = (double)numFloats * 1 * realRepeats * 240 * 1;
double gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Add1")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Add2 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the Add2 kernel
t = 0.0f;
cudaEventRecord(start, 0);
Add2<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 1 * realRepeats * 120 * 2;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Add2")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Add4 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the Add4 kernel
t = 0.0f;
cudaEventRecord(start, 0);
Add4<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 1 * realRepeats * 60 * 4;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Add4")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Add8 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the Add8 kernel
t = 0.0f;
cudaEventRecord(start, 0);
Add8<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 1 * realRepeats * 30 * 8;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Add8")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Mul1 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the Mul1 kernel
t = 0.0f;
cudaEventRecord(start, 0);
Mul1<T><<< blocks, threads >>>(gpu_mem, realRepeats, 1.01);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 200 * 1;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Mul1")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Mul2 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the Mul2 kernel
t = 0.0f;
cudaEventRecord(start, 0);
Mul2<T><<< blocks, threads >>>(gpu_mem, realRepeats, 1.01);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 100 * 2;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Mul2")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Mul4 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the Mul4 kernel
t = 0.0f;
cudaEventRecord(start, 0);
Mul4<T><<< blocks, threads >>>(gpu_mem, realRepeats, 1.01);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 50 * 4;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Mul4")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// Mul8 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the Mul8 kernel
t = 0.0f;
cudaEventRecord(start, 0);
Mul8<T><<< blocks, threads >>>(gpu_mem, realRepeats, 1.01);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 25 * 8;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("Mul8")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MAdd1 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the MAdd1 kernel
t = 0.0f;
cudaEventRecord(start, 0);
MAdd1<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0, 0.9899);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 240 * 1;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MAdd1")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MAdd2 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the MAdd2 kernel
t = 0.0f;
cudaEventRecord(start, 0);
MAdd2<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0, 0.9899);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 120 * 2;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MAdd2")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MAdd4 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the MAdd4 kernel
t = 0.0f;
cudaEventRecord(start, 0);
MAdd4<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0, 0.9899);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 60 * 4;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MAdd4")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MAdd8 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the MAdd8 kernel
t = 0.0f;
cudaEventRecord(start, 0);
MAdd8<T><<< blocks, threads >>>(gpu_mem, realRepeats, 10.0, 0.9899);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 2 * realRepeats * 30 * 8;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MAdd8")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MulMAdd1 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the MulMAdd1 kernel
t = 0.0f;
cudaEventRecord(start, 0);
MulMAdd1<T><<< blocks, threads >>>(gpu_mem, realRepeats, 3.75, 0.355);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 3 * realRepeats * 160 * 1;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MulMAdd1")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MulMAdd2 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the MulMAdd2 kernel
t = 0.0f;
cudaEventRecord(start, 0);
MulMAdd2<T><<< blocks, threads >>>(gpu_mem, realRepeats, 3.75, 0.355);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 3 * realRepeats * 80 * 2;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MulMAdd2")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MulMAdd4 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the MulMAdd4 kernel
t = 0.0f;
cudaEventRecord(start, 0);
MulMAdd4<T><<< blocks, threads >>>(gpu_mem, realRepeats, 3.75, 0.355);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 3 * realRepeats * 40 * 4;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MulMAdd4")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
////////// MulMAdd8 //////////
// Initialize host data, with the first half the same as the second
for (int j=0; j<halfNumFloats; ++j)
{
hostMem[j] = hostMem[numFloats-j-1] = (T)(drand48()*10.0);
}
// copy host memory to GPU memory
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(gpu_mem, hostMem, numFloats*sizeof(T), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Execute the MulMAdd8 kernel
t = 0.0f;
cudaEventRecord(start, 0);
MulMAdd8<T><<< blocks, threads >>>(gpu_mem, realRepeats, 3.75, 0.355);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
cudaEventElapsedTime(&t, start, stop);
t *= 1.e6;
// flopCount = numFloats(pixels) * flopCount/op * numLoopIters * unrollFactor * numStreams
flopCount = (double)numFloats * 3 * realRepeats * 20 * 8;
gflop = flopCount / (double)(t);
sprintf (sizeStr, "Size:%07d", numFloats);
resultDB.AddResult(string("MulMAdd8")+precision, sizeStr, "GFLOPS", gflop);
// Zero out the test host memory
for (int j=0 ; j<numFloats ; ++j)
hostMem2[j] = 0.0;
// Read the result device memory back to the host
cudaEventRecord(start, 0); // do I even need this if I do not need the time?
cudaMemcpy(hostMem2, gpu_mem, numFloats*sizeof(T), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Check the result -- At a minimum the first half of memory
// should match the second half exactly
for (int j=0 ; j<halfNumFloats ; ++j)
{
if (hostMem2[j] != hostMem2[numFloats-j-1])
{
cout << "Error; hostMem2[" << j << "]=" << hostMem2[j]
<< " is different from its twin element hostMem2["
<< (numFloats-j-1) << "]=" << hostMem2[numFloats-j-1]
<<"; stopping check\n";
break;
}
}
// update progress bar
pb.addItersDone();
if (!verbose && !quiet)
pb.Show(stdout);
}
delete[] hostMem;
delete[] hostMem2;
cudaFree((void*)gpu_mem);
CHECK_CUDA_ERROR();
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
// Macros used to construct MaxFlops kernels
// Each mad OP is 32*2 = 64 FLOPS
#define OP { \
s0 = s6*s5 + s28; \
s1 = s7*s6 + s29; \
s2 = s8*s7 + s30; \
s3 = s9*s8 + s31; \
s4 = s10*s9 + s0; \
s5 = s11*s10 + s1; \
s6 = s12*s11 + s2; \
s7 = s13*s12 + s3; \
s8 = s14*s13 + s4; \
s9 = s15*s14 + s5; \
s10 = s16*s15 + s6; \
s11 = s17*s16 + s7; \
s12 = s18*s17 + s8; \
s13 = s19*s18 + s9; \
s14 = s20*s19 + s10; \
s15 = s21*s20 + s11; \
s16 = s22*s21 + s12; \
s17 = s23*s22 + s13; \
s18 = s24*s23 + s14; \
s19 = s25*s24 + s15; \
s20 = s26*s25 + s16; \
s21 = s27*s26 + s17; \
s22 = s28*s27 + s18; \
s23 = s29*s28 + s19; \
s24 = s30*s29 + s20; \
s25 = s31*s30 + s21; \
s26 = s0*s31 + s22; \
s27 = s1*s0 + s23; \
s28 = s2*s1 + s24; \
s29 = s3*s2 + s25; \
s30 = s4*s3 + s26; \
s31 = s5*s4 + s27; \
}
// so Each OP10 is 640 FLOPS
#define OP10 { OP OP OP OP OP OP OP OP OP OP }
// Each mad+mul MMOP is 8*3 = 24 FLOPS
#define MMOP { \
s0 = s4*s4 + s4; \
s6 = s0*s5; \
s1 = s5*s5 + s5; \
s7 = s1*s6; \
s2 = s6*s6 + s6; \
s0 = s2*s7; \
s3 = s7*s7 + s7; \
s1 = s3*s0; \
s4 = s0*s0 + s0; \
s2 = s4*s1; \
s5 = s1*s1 + s1; \
s3 = s5*s2; \
s6 = s2*s2 + s2; \
s4 = s6*s3; \
s7 = s3*s3 + s3; \
s5 = s7*s4; \
}
// So each OP10 is 240 FLOPS
#define MMOP10 { MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP MMOP }
// Benchmark Kernels
__global__ void MAddU(float *target, float val1, float val2)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
// Create a bunch of local variables we can use up to 32 steps..
register float v0=val1, v1=val2, v2=v0+v1, v3=v0+v2;
register float v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6;
register float v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10;
register float v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14;
register float v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2;
register float v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6;
register float v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10;
register float v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14;
register float s0=v0, s1=v1, s2=v2, s3=v3;
register float s4=v4, s5=v5, s6=v6, s7=v7;
register float s8=v8, s9=v9, s10=v10, s11=v11;
register float s12=v12, s13=v13, s14=v14, s15=v15;
register float s16=v16, s17=v17, s18=v18, s19=v19;
register float s20=v20, s21=v21, s22=v22, s23=v23;
register float s24=v24, s25=v25, s26=v26, s27=v27;
register float s28=v28, s29=v29, s30=v30, s31=v31;
// 10 OP10s inside the loop = 6400 FLOPS in the .ptx code
// and 5 loops of 10 OP10s = 32000 FLOPS per pixel total
for (int i=0; i<5; i++)
{
OP10; OP10; OP10; OP10; OP10;
OP10; OP10; OP10; OP10; OP10;
}
float result = (s0+s1+s2+s3+s4+s5+s6+s7+
s8+s9+s10+s11+s12+s13+s14+s15 +
s16+s17+s18+s19+s20+s21+s22+s23+
s24+s25+s26+s27+s28+s29+s30+s31);
target[index] = result;
}
__global__ void MAddU_DP(double *target, double val1, double val2)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
register double v0=val1, v1=val2, v2=v0+v1, v3=v0+v2;
register double v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6;
register double v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10;
register double v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14;
register double v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2;
register double v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6;
register double v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10;
register double v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14;
register double s0=v0, s1=v1, s2=v2, s3=v3;
register double s4=v4, s5=v5, s6=v6, s7=v7;
register double s8=v8, s9=v9, s10=v10, s11=v11;
register double s12=v12, s13=v13, s14=v14, s15=v15;
register double s16=v16, s17=v17, s18=v18, s19=v19;
register double s20=v20, s21=v21, s22=v22, s23=v23;
register double s24=v24, s25=v25, s26=v26, s27=v27;
register double s28=v28, s29=v29, s30=v30, s31=v31;
// 10 OP10s inside the loop = 6400 FLOPS in the .ptx code
// and 5 loops of 10 OP10s = 32000 FLOPS per pixel total
for (int i=0; i<5; i++)
{
OP10; OP10; OP10; OP10; OP10;
OP10; OP10; OP10; OP10; OP10;
}
double result = (s0+s1+s2+s3+s4+s5+s6+s7+
s8+s9+s10+s11+s12+s13+s14+s15 +
s16+s17+s18+s19+s20+s21+s22+s23+
s24+s25+s26+s27+s28+s29+s30+s31);
target[index] = result;
}
__global__ void MulMAddU(float *target, float val1, float val2)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
register float v0=val1, v1=val2, v2=v0+v1, v3=v0+v2;
register float v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6;
register float v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10;
register float v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14;
register float v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2;
register float v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6;
register float v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10;
register float v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14;
register float s0=v0, s1=v1, s2=v2, s3=v3;
register float s4=v4, s5=v5, s6=v6, s7=v7;
register float s8=v8, s9=v9, s10=v10, s11=v11;
register float s12=v12, s13=v13, s14=v14, s15=v15;
register float s16=v16, s17=v17, s18=v18, s19=v19;
register float s20=v20, s21=v21, s22=v22, s23=v23;
register float s24=v24, s25=v25, s26=v26, s27=v27;
register float s28=v28, s29=v29, s30=v30, s31=v31;
// 10 OP10s inside the loop = 2400 FLOPS in the .ptx code
// and 5 loops of 10 OP10s = 12000 FLOPS per pixel total
for (int i=0; i<5; i++)
{
MMOP10; MMOP10; MMOP10; MMOP10; MMOP10;
MMOP10; MMOP10; MMOP10; MMOP10; MMOP10;
}
float result = (s0+s1+s2+s3+s4+s5+s6+s7+
s8+s9+s10+s11+s12+s13+s14+s15 +
s16+s17+s18+s19+s20+s21+s22+s23+
s24+s25+s26+s27+s28+s29+s30+s31);
target[index] = result;
}
__global__ void MulMAddU_DP(double *target, double val1, double val2)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
register double v0=val1, v1=val2, v2=v0+v1, v3=v0+v2;
register double v4=v0+v3, v5=v0+v4, v6=v0+v5, v7=v0+v6;
register double v8=v0+v7, v9=v0+v8, v10=v0+v9, v11=v0+v10;
register double v12=v0+v11, v13=v0+v12, v14=v0+v13, v15=v0+v14;
register double v16=v0+v15, v17=v16+v0, v18=v16+v1, v19=v16+v2;
register double v20=v16+v3, v21=v16+v4, v22=v16+v5, v23=v16+v6;
register double v24=v16+v7, v25=v16+v8, v26=v16+v9, v27=v16+v10;
register double v28=v16+v11, v29=v16+v12, v30=v16+v13, v31=v16+v14;
register double s0=v0, s1=v1, s2=v2, s3=v3;
register double s4=v4, s5=v5, s6=v6, s7=v7;
register double s8=v8, s9=v9, s10=v10, s11=v11;
register double s12=v12, s13=v13, s14=v14, s15=v15;
register double s16=v16, s17=v17, s18=v18, s19=v19;
register double s20=v20, s21=v21, s22=v22, s23=v23;
register double s24=v24, s25=v25, s26=v26, s27=v27;
register double s28=v28, s29=v29, s30=v30, s31=v31;
// 10 OP10s inside the loop = 2400 FLOPS in the .ptx code
// and 5 loops of 10 OP10s = 12000 FLOPS per pixel total
for (int i=0; i<5; i++)
{
MMOP10; MMOP10; MMOP10; MMOP10; MMOP10;
MMOP10; MMOP10; MMOP10; MMOP10; MMOP10;
}
double result = (s0+s1+s2+s3+s4+s5+s6+s7+
s8+s9+s10+s11+s12+s13+s14+s15 +
s16+s17+s18+s19+s20+s21+s22+s23+
s24+s25+s26+s27+s28+s29+s30+s31);
target[index] = result;
}
// v = 10.0
#define ADD1_OP s=v-s;
#define ADD2_OP ADD1_OP s2=v-s2;
#define ADD4_OP ADD2_OP s3=v-s3; s4=v-s4;
#define ADD8_OP ADD4_OP s5=v-s5; s6=v-s6; s7=v-s7; s8=v-s8;
// v = 1.01
#define MUL1_OP s=s*s*v;
#define MUL2_OP MUL1_OP s2=s2*s2*v;
#define MUL4_OP MUL2_OP s3=s3*s3*v; s4=s4*s4*v;
#define MUL8_OP MUL4_OP s5=s5*s5*v; s6=s6*s6*v; s7=s7*s7*v; s8=s8*s8*v;
// v1 = 10.0, v2 = 0.9899
#define MADD1_OP s=v1-s*v2;
#define MADD2_OP MADD1_OP s2=v1-s2*v2;
#define MADD4_OP MADD2_OP s3=v1-s3*v2; s4=v1-s4*v2;
#define MADD8_OP MADD4_OP s5=v1-s5*v2; s6=v1-s6*v2; s7=v1-s7*v2; s8=v1-s8*v2;
// v1 = 3.75, v2 = 0.355
#define MULMADD1_OP s=(v1-v2*s)*s;
#define MULMADD2_OP MULMADD1_OP s2=(v1-v2*s2)*s2;
#define MULMADD4_OP MULMADD2_OP s3=(v1-v2*s3)*s3; s4=(v1-v2*s4)*s4;
#define MULMADD8_OP MULMADD4_OP s5=(v1-v2*s5)*s5; s6=(v1-v2*s6)*s6; s7=(v1-v2*s7)*s7; s8=(v1-v2*s8)*s8;
#define ADD1_MOP20 \
ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP \
ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP ADD1_OP
#define ADD2_MOP20 \
ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP \
ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP ADD2_OP
#define ADD4_MOP10 \
ADD4_OP ADD4_OP ADD4_OP ADD4_OP ADD4_OP \
ADD4_OP ADD4_OP ADD4_OP ADD4_OP ADD4_OP
#define ADD8_MOP5 \
ADD8_OP ADD8_OP ADD8_OP ADD8_OP ADD8_OP
#define MUL1_MOP20 \
MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP \
MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP MUL1_OP
#define MUL2_MOP20 \
MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP \
MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP MUL2_OP
#define MUL4_MOP10 \
MUL4_OP MUL4_OP MUL4_OP MUL4_OP MUL4_OP \
MUL4_OP MUL4_OP MUL4_OP MUL4_OP MUL4_OP
#define MUL8_MOP5 \
MUL8_OP MUL8_OP MUL8_OP MUL8_OP MUL8_OP
#define MADD1_MOP20 \
MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP \
MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP MADD1_OP
#define MADD2_MOP20 \
MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP \
MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP MADD2_OP
#define MADD4_MOP10 \
MADD4_OP MADD4_OP MADD4_OP MADD4_OP MADD4_OP \
MADD4_OP MADD4_OP MADD4_OP MADD4_OP MADD4_OP
#define MADD8_MOP5 \
MADD8_OP MADD8_OP MADD8_OP MADD8_OP MADD8_OP
#define MULMADD1_MOP20 \
MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP \
MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP MULMADD1_OP
#define MULMADD2_MOP20 \
MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP \
MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP MULMADD2_OP
#define MULMADD4_MOP10 \
MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP \
MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP MULMADD4_OP
#define MULMADD8_MOP5 \
MULMADD8_OP MULMADD8_OP MULMADD8_OP MULMADD8_OP MULMADD8_OP
template <class T>
__global__ void Add1(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid];
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 12 more times for 240 operations total.
*/
ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20
ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20 ADD1_MOP20
}
data[gid] = s;
}
template <class T>
__global__ void Add2(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 6 more times for 120 operations total.
*/
ADD2_MOP20 ADD2_MOP20 ADD2_MOP20
ADD2_MOP20 ADD2_MOP20 ADD2_MOP20
}
data[gid] = s+s2;
}
template <class T>
__global__ void Add4(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 10 operations.
Unroll 6 more times for 60 operations total.
*/
ADD4_MOP10 ADD4_MOP10 ADD4_MOP10
ADD4_MOP10 ADD4_MOP10 ADD4_MOP10
}
data[gid] = (s+s2)+(s3+s4);
}
template <class T>
__global__ void Add8(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 5 operations.
Unroll 6 more times for 30 operations total.
*/
ADD8_MOP5 ADD8_MOP5 ADD8_MOP5
ADD8_MOP5 ADD8_MOP5 ADD8_MOP5
}
data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8));
}
template <class T>
__global__ void Mul1(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid]-data[gid]+0.999f;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 10 more times for 200 operations total.
*/
MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20
MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20 MUL1_MOP20
}
data[gid] = s;
}
template <class T>
__global__ void Mul2(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 5 more times for 100 operations total.
*/
MUL2_MOP20 MUL2_MOP20 MUL2_MOP20
MUL2_MOP20 MUL2_MOP20
}
data[gid] = s+s2;
}
template <class T>
__global__ void Mul4(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f, s3=s-0.0002f, s4=s-0.0003f;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 10 operations.
Unroll 5 more times for 50 operations total.
*/
MUL4_MOP10 MUL4_MOP10 MUL4_MOP10
MUL4_MOP10 MUL4_MOP10
}
data[gid] = (s+s2)+(s3+s4);
}
template <class T>
__global__ void Mul8(T *data, int nIters, T v) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid]-data[gid]+0.999f, s2=s-0.0001f, s3=s-0.0002f, s4=s-0.0003f, s5=s-0.0004f, s6=s-0.0005f, s7=s-0.0006f, s8=s-0.0007f;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 5 operations.
Unroll 5 more times for 25 operations total.
*/
MUL8_MOP5 MUL8_MOP5 MUL8_MOP5
MUL8_MOP5 MUL8_MOP5
}
data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8));
}
template <class T>
__global__ void MAdd1(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid];
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 12 more times for 240 operations total.
*/
MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20
MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20 MADD1_MOP20
}
data[gid] = s;
}
template <class T>
__global__ void MAdd2(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 6 more times for 120 operations total.
*/
MADD2_MOP20 MADD2_MOP20 MADD2_MOP20
MADD2_MOP20 MADD2_MOP20 MADD2_MOP20
}
data[gid] = s+s2;
}
template <class T>
__global__ void MAdd4(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 10 operations.
Unroll 6 more times for 60 operations total.
*/
MADD4_MOP10 MADD4_MOP10 MADD4_MOP10
MADD4_MOP10 MADD4_MOP10 MADD4_MOP10
}
data[gid] = (s+s2)+(s3+s4);
}
template <class T>
__global__ void MAdd8(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 5 operations.
Unroll 6 more times for 30 operations total.
*/
MADD8_MOP5 MADD8_MOP5 MADD8_MOP5
MADD8_MOP5 MADD8_MOP5 MADD8_MOP5
}
data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8));
}
template <class T>
__global__ void MulMAdd1(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid];
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 8 more times for 160 operations total.
*/
MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20
MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20 MULMADD1_MOP20
}
data[gid] = s;
}
template <class T>
__global__ void MulMAdd2(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 20 operations.
Unroll 4 more times for 80 operations total.
*/
MULMADD2_MOP20 MULMADD2_MOP20
MULMADD2_MOP20 MULMADD2_MOP20
}
data[gid] = s+s2;
}
template <class T>
__global__ void MulMAdd4(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 10 operations.
Unroll 4 more times for 40 operations total.
*/
MULMADD4_MOP10 MULMADD4_MOP10
MULMADD4_MOP10 MULMADD4_MOP10
}
data[gid] = (s+s2)+(s3+s4);
}
template <class T>
__global__ void MulMAdd8(T *data, int nIters, T v1, T v2) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register T s = data[gid], s2=10.0f-s, s3=9.0f-s, s4=9.0f-s2, s5=8.0f-s, s6=8.0f-s2, s7=7.0f-s, s8=7.0f-s2;
for (int j=0 ; j<nIters ; ++j) {
/* Each macro op has 5 operations.
Unroll 4 more times for 20 operations total.
*/
MULMADD8_MOP5 MULMADD8_MOP5
MULMADD8_MOP5 MULMADD8_MOP5
}
data[gid] = ((s+s2)+(s3+s4))+((s5+s6)+(s7+s8));
}
|
5a2eb7400001cf30134ecd23429b718007701567.hip | // !!! This is a file automatically generated by hipify!!!
#define GUI
//#include "Solver.cuh"
#include "Window.h"
extern Solver solver;
/////////////////////////////////
//section experimental
void EnableP2Psharing(unsigned int devices_count = 1) {
std::cout << "Enabling P2P sharing..." << std::endl;
for (unsigned int i = 0; i < devices_count; i++) {
for (unsigned int j = 0; j < devices_count; j++) {
int is_able = NULL;
hipSetDevice(i);
hipDeviceCanAccessPeer(&is_able, i, j);
if (is_able) {
checkCudaErrors(hipDeviceEnablePeerAccess(j, 0));
std::cout << "Enabled P2P sharing for: " << i << std::endl;
}
}
}
}
/////////////////////////////////
#include "wtypes.h"
#include <iostream>
using namespace std;
// Get the horizontal and vertical screen sizes in pixel
void GetDesktopResolution(int& horizontal, int& vertical)
{
RECT desktop;
// Get a handle to the desktop window
const HWND hDesktop = GetDesktopWindow();
// Get the size of screen to the variable desktop
GetWindowRect(hDesktop, &desktop);
// The top left corner will have coordinates (0,0)
// and the bottom right corner will have coordinates
// (horizontal, vertical)
horizontal = desktop.right;
vertical = desktop.bottom;
}
int main(int argc, char* argv[]) {
//srand(1);
int devicesCount;
hipGetDeviceCount(&devicesCount);
std::cout << "Found " << devicesCount << " devices:" << std::endl;
std::cout << "----------------------------------------" << std::endl;
for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, deviceIndex);
std::cout << deviceProperties.name << " -> " << deviceProperties.totalGlobalMem << std::endl;
}
int Best_Device_Index = 0;
long long Memory = 0;
for (int deviceIndex = 0; deviceIndex < devicesCount; deviceIndex++) {
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, deviceIndex);
if (deviceProperties.totalGlobalMem > Memory){
Memory = deviceProperties.totalGlobalMem;
Best_Device_Index = deviceIndex;
}
}
std::cout << "----------------------------------------" << std::endl;
hipSetDevice(Best_Device_Index);
std::cout << "Choosing device: " << Best_Device_Index << std::endl;
#ifndef WINDOWS7_BUILD
if (false) {
hipDeviceProp_t deviceProperties;
hipGetDeviceProperties(&deviceProperties, Best_Device_Index);
hipDeviceSetLimit(cudaLimitPersistingL2CacheSize, deviceProperties.persistingL2CacheMaxSize); /* Set aside max possible size of L2 cache for persisting accesses */
std::cout << "Setting L2 max cache: " << deviceProperties.persistingL2CacheMaxSize << std::endl;
}
#endif // !WINDOWS7_BUILD
EnableP2Psharing(devicesCount);
hipSetDevice(Best_Device_Index);
#ifdef EXPERIMENTAL
if (argc <= 1) {
if (false) {
std::cout << "Using All (" << devicesCount << ") devices" << std::endl;
solver.Initialize(devicesCount, Best_Device_Index);
}
else {
std::cout << "Using (" << 1 << ") device" << std::endl;
solver.Initialize(1, Best_Device_Index);
}
}
else if ( argc == 2){
std::cout << "Using " << std::stoi(argv[1]) << " devices" << std::endl;
solver.Initialize(std::stoi(argv[1]),Best_Device_Index);
}
else if (argc == 3) {
std::cout << "Using " << std::stoi(argv[1]) << " devices" << std::endl;
std::cout << "Using device: " << std::stoi(argv[2]);
hipSetDevice(std::stoi(argv[2]));
solver.Initialize(std::stoi(argv[1]), std::stoi(argv[2]));
}
#ifdef OBJECTS_EXPERIMENTAL
std::cout << "Generating example scene" << std::endl;
solver.ExampleScene(true);//true
#else
solver.ExportVDBScene();
#endif
//solver.ExampleScene(true);
#ifndef WINDOWS7_BUILD
HWND hd = GetDesktopWindow();
RECT rect;
int no_menu_bar_width = GetSystemMetrics(SM_CXFULLSCREEN);
int no_menu_bar_height = GetSystemMetrics(SM_CYFULLSCREEN);
/*
int no_menu_bar_width = 0;
int no_menu_bar_height = 0;
GetDesktopResolution(no_menu_bar_width, no_menu_bar_height);
*/
int zoom = GetDpiForWindow(hd);
double dpi = (float)zoom / 100;
/*
switch (zoom) {
case 96:
dpi = 1;
std::cout << "100%" << std::endl;
break;
case 120:
dpi = 1.25;
std::cout << "125%" << std::endl;
break;
case 144:
dpi = 1.5;
std::cout << "150%" << std::endl;
break;
case 192:
dpi = 2;
std::cout << "200%" << std::endl;
break;
default:
std::cout << "error" << std::endl;
break;
}
*/
#else
int no_menu_bar_width = 1400;
int no_menu_bar_height = 800;
double dpi = 1;
#endif
std::cout << "DPI: " << dpi << std::endl;
int width = no_menu_bar_width * dpi;
int height = no_menu_bar_height * dpi;
std::cout << width << "x" << height << std::endl;
float Window_Resolution[2] = { width, height };
float Image_Resolution[2] = { 900, 1024 };
std::cout << "Setting image resolution" << std::endl;
solver.setImageResolution(Image_Resolution[0], Image_Resolution[1]);
solver.Initialize_Simulation();
Window(Window_Resolution, dpi);
solver.Clear_Simulation_Data();
//std::cout << "Rendering animation video..." << std::endl;
//std::system("make_video.sh");
#else
#ifdef GUI
std::cout << "Hello" << std::endl;
float Image_Resolution[2] = { 640, 640 };
const int3 img_d = make_int3(Image_Resolution[0], Image_Resolution[1], 0);
uint8_t* img = new uint8_t[3 * img_d.x * img_d.y];
Window(Image_Resolution);
#else
initialize();
#endif
#endif
return 0;
} | 5a2eb7400001cf30134ecd23429b718007701567.cu | #define GUI
//#include "Solver.cuh"
#include "Window.h"
extern Solver solver;
/////////////////////////////////
//section experimental
void EnableP2Psharing(unsigned int devices_count = 1) {
std::cout << "Enabling P2P sharing..." << std::endl;
for (unsigned int i = 0; i < devices_count; i++) {
for (unsigned int j = 0; j < devices_count; j++) {
int is_able = NULL;
cudaSetDevice(i);
cudaDeviceCanAccessPeer(&is_able, i, j);
if (is_able) {
checkCudaErrors(cudaDeviceEnablePeerAccess(j, 0));
std::cout << "Enabled P2P sharing for: " << i << std::endl;
}
}
}
}
/////////////////////////////////
#include "wtypes.h"
#include <iostream>
using namespace std;
// Get the horizontal and vertical screen sizes in pixel
void GetDesktopResolution(int& horizontal, int& vertical)
{
RECT desktop;
// Get a handle to the desktop window
const HWND hDesktop = GetDesktopWindow();
// Get the size of screen to the variable desktop
GetWindowRect(hDesktop, &desktop);
// The top left corner will have coordinates (0,0)
// and the bottom right corner will have coordinates
// (horizontal, vertical)
horizontal = desktop.right;
vertical = desktop.bottom;
}
int main(int argc, char* argv[]) {
//srand(1);
int devicesCount;
cudaGetDeviceCount(&devicesCount);
std::cout << "Found " << devicesCount << " devices:" << std::endl;
std::cout << "----------------------------------------" << std::endl;
for (int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, deviceIndex);
std::cout << deviceProperties.name << " -> " << deviceProperties.totalGlobalMem << std::endl;
}
int Best_Device_Index = 0;
long long Memory = 0;
for (int deviceIndex = 0; deviceIndex < devicesCount; deviceIndex++) {
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, deviceIndex);
if (deviceProperties.totalGlobalMem > Memory){
Memory = deviceProperties.totalGlobalMem;
Best_Device_Index = deviceIndex;
}
}
std::cout << "----------------------------------------" << std::endl;
cudaSetDevice(Best_Device_Index);
std::cout << "Choosing device: " << Best_Device_Index << std::endl;
#ifndef WINDOWS7_BUILD
if (false) {
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, Best_Device_Index);
cudaDeviceSetLimit(cudaLimitPersistingL2CacheSize, deviceProperties.persistingL2CacheMaxSize); /* Set aside max possible size of L2 cache for persisting accesses */
std::cout << "Setting L2 max cache: " << deviceProperties.persistingL2CacheMaxSize << std::endl;
}
#endif // !WINDOWS7_BUILD
EnableP2Psharing(devicesCount);
cudaSetDevice(Best_Device_Index);
#ifdef EXPERIMENTAL
if (argc <= 1) {
if (false) {
std::cout << "Using All (" << devicesCount << ") devices" << std::endl;
solver.Initialize(devicesCount, Best_Device_Index);
}
else {
std::cout << "Using (" << 1 << ") device" << std::endl;
solver.Initialize(1, Best_Device_Index);
}
}
else if ( argc == 2){
std::cout << "Using " << std::stoi(argv[1]) << " devices" << std::endl;
solver.Initialize(std::stoi(argv[1]),Best_Device_Index);
}
else if (argc == 3) {
std::cout << "Using " << std::stoi(argv[1]) << " devices" << std::endl;
std::cout << "Using device: " << std::stoi(argv[2]);
cudaSetDevice(std::stoi(argv[2]));
solver.Initialize(std::stoi(argv[1]), std::stoi(argv[2]));
}
#ifdef OBJECTS_EXPERIMENTAL
std::cout << "Generating example scene" << std::endl;
solver.ExampleScene(true);//true
#else
solver.ExportVDBScene();
#endif
//solver.ExampleScene(true);
#ifndef WINDOWS7_BUILD
HWND hd = GetDesktopWindow();
RECT rect;
int no_menu_bar_width = GetSystemMetrics(SM_CXFULLSCREEN);
int no_menu_bar_height = GetSystemMetrics(SM_CYFULLSCREEN);
/*
int no_menu_bar_width = 0;
int no_menu_bar_height = 0;
GetDesktopResolution(no_menu_bar_width, no_menu_bar_height);
*/
int zoom = GetDpiForWindow(hd);
double dpi = (float)zoom / 100;
/*
switch (zoom) {
case 96:
dpi = 1;
std::cout << "100%" << std::endl;
break;
case 120:
dpi = 1.25;
std::cout << "125%" << std::endl;
break;
case 144:
dpi = 1.5;
std::cout << "150%" << std::endl;
break;
case 192:
dpi = 2;
std::cout << "200%" << std::endl;
break;
default:
std::cout << "error" << std::endl;
break;
}
*/
#else
int no_menu_bar_width = 1400;
int no_menu_bar_height = 800;
double dpi = 1;
#endif
std::cout << "DPI: " << dpi << std::endl;
int width = no_menu_bar_width * dpi;
int height = no_menu_bar_height * dpi;
std::cout << width << "x" << height << std::endl;
float Window_Resolution[2] = { width, height };
float Image_Resolution[2] = { 900, 1024 };
std::cout << "Setting image resolution" << std::endl;
solver.setImageResolution(Image_Resolution[0], Image_Resolution[1]);
solver.Initialize_Simulation();
Window(Window_Resolution, dpi);
solver.Clear_Simulation_Data();
//std::cout << "Rendering animation video..." << std::endl;
//std::system("make_video.sh");
#else
#ifdef GUI
std::cout << "Hello" << std::endl;
float Image_Resolution[2] = { 640, 640 };
const int3 img_d = make_int3(Image_Resolution[0], Image_Resolution[1], 0);
uint8_t* img = new uint8_t[3 * img_d.x * img_d.y];
Window(Image_Resolution);
#else
initialize();
#endif
#endif
return 0;
} |
8860b5b8346f67a81ec9c117c88e67b764c92cb3.hip | // !!! This is a file automatically generated by hipify!!!
/**
*
* @file cublasDparfb.c
*
* PLASMA core_blas kernel
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Dulceneia Becker
* @date 2011-06-14
* @generated d Tue Jan 7 11:44:48 2014
*
**/
#include <cblas.h>
#include <lapacke.h>
#include "common.h"
#include "runtime.h"
#include "core_blas-gpu.h"
/***************************************************************************//**
*
* @ingroup CORE_double
*
* CORE_dparfb applies a complex upper triangular block reflector H
* or its transpose H' to a complex rectangular matrix formed by
* coupling two tiles A1 and A2. Matrix V is:
*
* COLUMNWISE ROWWISE
*
* | K | | N2-L | L |
* __ _____________ __ __ _________________ __
* | | | | | \
* | | | | | \ L
* M2-L | | | K |_______________|_____\ __
* | | | M2 | |
* __ |____| | | | K-L
* \ | | __ |______________________| __
* L \ | |
* __ \|______| __ | N2 |
*
* | L | K-L |
*
*******************************************************************************
*
* @param[in] side
* @arg PlasmaLeft : apply Q or Q**T from the Left;
* @arg PlasmaRight : apply Q or Q**T from the Right.
*
* @param[in] trans
* @arg PlasmaNoTrans : No transpose, apply Q;
* @arg PlasmaTrans : ConjTranspose, apply Q**T.
*
* @param[in] direct
* Indicates how H is formed from a product of elementary
* reflectors
* @arg PlasmaForward : H = H(1) H(2) . . . H(k) (Forward)
* @arg PlasmaBackward : H = H(k) . . . H(2) H(1) (Backward)
*
* @param[in] storev
* Indicates how the vectors which define the elementary
* reflectors are stored:
* @arg PlasmaColumnwise
* @arg PlasmaRowwise
*
* @param[in] M1
* The number of columns of the tile A1. M1 >= 0.
*
* @param[in] N1
* The number of rows of the tile A1. N1 >= 0.
*
* @param[in] M2
* The number of columns of the tile A2. M2 >= 0.
*
* @param[in] N2
* The number of rows of the tile A2. N2 >= 0.
*
* @param[in] K
* The order of the matrix T (= the number of elementary
* reflectors whose product defines the block reflector).
*
* @param[in] L
* The size of the triangular part of V
*
* @param[in,out] A1
* On entry, the M1-by-N1 tile A1.
* On exit, A1 is overwritten by the application of Q.
*
* @param[in] LDA1
* The leading dimension of the array A1. LDA1 >= max(1,N1).
*
* @param[in,out] A2
* On entry, the M2-by-N2 tile A2.
* On exit, A2 is overwritten by the application of Q.
*
* @param[in] LDA2
* The leading dimension of the tile A2. LDA2 >= max(1,N2).
*
* @param[in] V
* (LDV,K) if STOREV = 'C'
* (LDV,M2) if STOREV = 'R' and SIDE = 'L'
* (LDV,N2) if STOREV = 'R' and SIDE = 'R'
* Matrix V.
*
* @param[in] LDV
* The leading dimension of the array V.
* If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M2);
* if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N2);
* if STOREV = 'R', LDV >= K.
*
* @param[out] T
* The triangular K-by-K matrix T in the representation of the
* block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] LDT
* The leading dimension of the array T. LDT >= K.
*
* @param[in,out] WORK
*
* @param[in] LDWORK
* The dimension of the array WORK.
*
*******************************************************************************
*
* @return
* \retval PLASMA_SUCCESS successful exit
* \retval <0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
/* This kernel is never traced so return type on previous line for convert2eztrace.pl script */
int
cublasDparfb(hipblasHandle_t handle, hipblasSideMode_t side, hipblasOperation_t trans, PLASMA_enum direct, PLASMA_enum storev,
int M1, int N1, int M2, int N2, int K, int L,
double *A1, int LDA1,
double *A2, int LDA2,
const double *V, int LDV,
const double *T, int LDT,
double *WORK, int LDWORK)
{
static double zone = 1.0;
static double mzone = -1.0;
int j;
/* Check input arguments */
if ((side != HIPBLAS_SIDE_LEFT) && (side != HIPBLAS_SIDE_RIGHT)) {
coreblas_error(1, "Illegal value of side");
return -1;
}
if ((trans != HIPBLAS_OP_N) && (trans != HIPBLAS_OP_T)) {
coreblas_error(2, "Illegal value of trans");
return -2;
}
if ((direct != PlasmaForward) && (direct != PlasmaBackward)) {
coreblas_error(3, "Illegal value of direct");
return -3;
}
if ((storev != PlasmaColumnwise) && (storev != PlasmaRowwise)) {
coreblas_error(4, "Illegal value of storev");
return -4;
}
if (M1 < 0) {
coreblas_error(5, "Illegal value of M1");
return -5;
}
if (N1 < 0) {
coreblas_error(6, "Illegal value of N1");
return -6;
}
if ((M2 < 0) ||
( (side == HIPBLAS_SIDE_RIGHT) && (M1 != M2) ) ) {
coreblas_error(7, "Illegal value of M2");
return -7;
}
if ((N2 < 0) ||
( (side == HIPBLAS_SIDE_LEFT) && (N1 != N2) ) ) {
coreblas_error(8, "Illegal value of N2");
return -8;
}
if (K < 0) {
coreblas_error(9, "Illegal value of K");
return -9;
}
/* Quick return */
if ((M1 == 0) || (N1 == 0) || (M2 == 0) || (N2 == 0) || (K == 0))
return PLASMA_SUCCESS;
if (direct == PlasmaForward) {
if (side == HIPBLAS_SIDE_LEFT) {
/*
* Column or Rowwise / Forward / Left
* ----------------------------------
*
* Form H * A or H' * A where A = ( A1 )
* ( A2 )
*/
/* W = A1 + op(V) * A2 */
cublasDpamm(
handle, PlasmaW, HIPBLAS_SIDE_LEFT, storev,
K, N1, M2, L,
A1, LDA1,
A2, LDA2,
V, LDV,
WORK, LDWORK);
/* W = op(T) * W */
// should use KBLAS
hipblasDtrmm(
handle, side, HIPBLAS_FILL_MODE_UPPER,
trans, HIPBLAS_DIAG_NON_UNIT, K, N2,
&zone, T, LDT, WORK, LDWORK, WORK, LDWORK);
//cblas_dtrmm(
//CblasColMajor, CblasLeft, CblasUpper,
//(CBLAS_TRANSPOSE)trans, CblasNonUnit, K, N2,
//(zone), T, LDT, WORK, LDWORK);
/* A1 = A1 - W */
for(j = 0; j < N1; j++) {
hipblasDaxpy(
handle, K, &mzone,
&WORK[LDWORK*j], 1, &A1[LDA1*j], 1);
//cblas_daxpy(
//K, (mzone),
//&WORK[LDWORK*j], 1,
//&A1[LDA1*j], 1);
}
/* A2 = A2 - op(V) * W */
/* W also changes: W = V * W, A2 = A2 - W */
cublasDpamm(
handle, PlasmaA2, HIPBLAS_SIDE_LEFT, storev,
M2, N2, K, L,
A1, LDA1,
A2, LDA2,
V, LDV,
WORK, LDWORK);
}
else {
/*
* Column or Rowwise / Forward / Right
* -----------------------------------
*
* Form H * A or H' * A where A = ( A1 A2 )
*
*/
/* W = A1 + A2 * op(V) */
cublasDpamm(
handle, PlasmaW, HIPBLAS_SIDE_RIGHT, storev,
M1, K, N2, L,
A1, LDA1,
A2, LDA2,
V, LDV,
WORK, LDWORK);
/* W = W * op(T) */
// should use KBLAS
hipblasDtrmm(
handle, side, HIPBLAS_FILL_MODE_UPPER,
trans, HIPBLAS_DIAG_NON_UNIT, M2, K,
&zone, T, LDT, WORK, LDWORK, WORK, LDWORK);
//cblas_dtrmm(
//CblasColMajor, CblasRight, CblasUpper,
//(CBLAS_TRANSPOSE)trans, CblasNonUnit, M2, K,
//(zone), T, LDT, WORK, LDWORK);
/* A1 = A1 - W */
for(j = 0; j < K; j++) {
hipblasDaxpy(
handle, M1, &mzone,
&WORK[LDWORK*j], 1, &A1[LDA1*j], 1);
//cblas_daxpy(
//M1, (mzone),
//&WORK[LDWORK*j], 1,
//&A1[LDA1*j], 1);
}
/* A2 = A2 - W * op(V) */
/* W also changes: W = W * V', A2 = A2 - W */
cublasDpamm(
handle, PlasmaA2, HIPBLAS_SIDE_RIGHT, storev,
M2, N2, K, L,
A1, LDA1,
A2, LDA2,
V, LDV,
WORK, LDWORK);
}
}
else {
coreblas_error(3, "Not implemented (Backward / Left or Right)");
return PLASMA_ERR_NOT_SUPPORTED;
}
return PLASMA_SUCCESS;
}
| 8860b5b8346f67a81ec9c117c88e67b764c92cb3.cu | /**
*
* @file cublasDparfb.c
*
* PLASMA core_blas kernel
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Dulceneia Becker
* @date 2011-06-14
* @generated d Tue Jan 7 11:44:48 2014
*
**/
#include <cblas.h>
#include <lapacke.h>
#include "common.h"
#include "runtime.h"
#include "core_blas-gpu.h"
/***************************************************************************//**
*
* @ingroup CORE_double
*
* CORE_dparfb applies a complex upper triangular block reflector H
* or its transpose H' to a complex rectangular matrix formed by
* coupling two tiles A1 and A2. Matrix V is:
*
* COLUMNWISE ROWWISE
*
* | K | | N2-L | L |
* __ _____________ __ __ _________________ __
* | | | | | \
* | | | | | \ L
* M2-L | | | K |_______________|_____\ __
* | | | M2 | |
* __ |____| | | | K-L
* \ | | __ |______________________| __
* L \ | |
* __ \|______| __ | N2 |
*
* | L | K-L |
*
*******************************************************************************
*
* @param[in] side
* @arg PlasmaLeft : apply Q or Q**T from the Left;
* @arg PlasmaRight : apply Q or Q**T from the Right.
*
* @param[in] trans
* @arg PlasmaNoTrans : No transpose, apply Q;
* @arg PlasmaTrans : ConjTranspose, apply Q**T.
*
* @param[in] direct
* Indicates how H is formed from a product of elementary
* reflectors
* @arg PlasmaForward : H = H(1) H(2) . . . H(k) (Forward)
* @arg PlasmaBackward : H = H(k) . . . H(2) H(1) (Backward)
*
* @param[in] storev
* Indicates how the vectors which define the elementary
* reflectors are stored:
* @arg PlasmaColumnwise
* @arg PlasmaRowwise
*
* @param[in] M1
* The number of columns of the tile A1. M1 >= 0.
*
* @param[in] N1
* The number of rows of the tile A1. N1 >= 0.
*
* @param[in] M2
* The number of columns of the tile A2. M2 >= 0.
*
* @param[in] N2
* The number of rows of the tile A2. N2 >= 0.
*
* @param[in] K
* The order of the matrix T (= the number of elementary
* reflectors whose product defines the block reflector).
*
* @param[in] L
* The size of the triangular part of V
*
* @param[in,out] A1
* On entry, the M1-by-N1 tile A1.
* On exit, A1 is overwritten by the application of Q.
*
* @param[in] LDA1
* The leading dimension of the array A1. LDA1 >= max(1,N1).
*
* @param[in,out] A2
* On entry, the M2-by-N2 tile A2.
* On exit, A2 is overwritten by the application of Q.
*
* @param[in] LDA2
* The leading dimension of the tile A2. LDA2 >= max(1,N2).
*
* @param[in] V
* (LDV,K) if STOREV = 'C'
* (LDV,M2) if STOREV = 'R' and SIDE = 'L'
* (LDV,N2) if STOREV = 'R' and SIDE = 'R'
* Matrix V.
*
* @param[in] LDV
* The leading dimension of the array V.
* If STOREV = 'C' and SIDE = 'L', LDV >= max(1,M2);
* if STOREV = 'C' and SIDE = 'R', LDV >= max(1,N2);
* if STOREV = 'R', LDV >= K.
*
* @param[out] T
* The triangular K-by-K matrix T in the representation of the
* block reflector.
* T is upper triangular by block (economic storage);
* The rest of the array is not referenced.
*
* @param[in] LDT
* The leading dimension of the array T. LDT >= K.
*
* @param[in,out] WORK
*
* @param[in] LDWORK
* The dimension of the array WORK.
*
*******************************************************************************
*
* @return
* \retval PLASMA_SUCCESS successful exit
* \retval <0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
/* This kernel is never traced so return type on previous line for convert2eztrace.pl script */
int
cublasDparfb(cublasHandle_t handle, cublasSideMode_t side, cublasOperation_t trans, PLASMA_enum direct, PLASMA_enum storev,
int M1, int N1, int M2, int N2, int K, int L,
double *A1, int LDA1,
double *A2, int LDA2,
const double *V, int LDV,
const double *T, int LDT,
double *WORK, int LDWORK)
{
static double zone = 1.0;
static double mzone = -1.0;
int j;
/* Check input arguments */
if ((side != CUBLAS_SIDE_LEFT) && (side != CUBLAS_SIDE_RIGHT)) {
coreblas_error(1, "Illegal value of side");
return -1;
}
if ((trans != CUBLAS_OP_N) && (trans != CUBLAS_OP_T)) {
coreblas_error(2, "Illegal value of trans");
return -2;
}
if ((direct != PlasmaForward) && (direct != PlasmaBackward)) {
coreblas_error(3, "Illegal value of direct");
return -3;
}
if ((storev != PlasmaColumnwise) && (storev != PlasmaRowwise)) {
coreblas_error(4, "Illegal value of storev");
return -4;
}
if (M1 < 0) {
coreblas_error(5, "Illegal value of M1");
return -5;
}
if (N1 < 0) {
coreblas_error(6, "Illegal value of N1");
return -6;
}
if ((M2 < 0) ||
( (side == CUBLAS_SIDE_RIGHT) && (M1 != M2) ) ) {
coreblas_error(7, "Illegal value of M2");
return -7;
}
if ((N2 < 0) ||
( (side == CUBLAS_SIDE_LEFT) && (N1 != N2) ) ) {
coreblas_error(8, "Illegal value of N2");
return -8;
}
if (K < 0) {
coreblas_error(9, "Illegal value of K");
return -9;
}
/* Quick return */
if ((M1 == 0) || (N1 == 0) || (M2 == 0) || (N2 == 0) || (K == 0))
return PLASMA_SUCCESS;
if (direct == PlasmaForward) {
if (side == CUBLAS_SIDE_LEFT) {
/*
* Column or Rowwise / Forward / Left
* ----------------------------------
*
* Form H * A or H' * A where A = ( A1 )
* ( A2 )
*/
/* W = A1 + op(V) * A2 */
cublasDpamm(
handle, PlasmaW, CUBLAS_SIDE_LEFT, storev,
K, N1, M2, L,
A1, LDA1,
A2, LDA2,
V, LDV,
WORK, LDWORK);
/* W = op(T) * W */
// should use KBLAS
cublasDtrmm(
handle, side, CUBLAS_FILL_MODE_UPPER,
trans, CUBLAS_DIAG_NON_UNIT, K, N2,
&zone, T, LDT, WORK, LDWORK, WORK, LDWORK);
//cblas_dtrmm(
//CblasColMajor, CblasLeft, CblasUpper,
//(CBLAS_TRANSPOSE)trans, CblasNonUnit, K, N2,
//(zone), T, LDT, WORK, LDWORK);
/* A1 = A1 - W */
for(j = 0; j < N1; j++) {
cublasDaxpy(
handle, K, &mzone,
&WORK[LDWORK*j], 1, &A1[LDA1*j], 1);
//cblas_daxpy(
//K, (mzone),
//&WORK[LDWORK*j], 1,
//&A1[LDA1*j], 1);
}
/* A2 = A2 - op(V) * W */
/* W also changes: W = V * W, A2 = A2 - W */
cublasDpamm(
handle, PlasmaA2, CUBLAS_SIDE_LEFT, storev,
M2, N2, K, L,
A1, LDA1,
A2, LDA2,
V, LDV,
WORK, LDWORK);
}
else {
/*
* Column or Rowwise / Forward / Right
* -----------------------------------
*
* Form H * A or H' * A where A = ( A1 A2 )
*
*/
/* W = A1 + A2 * op(V) */
cublasDpamm(
handle, PlasmaW, CUBLAS_SIDE_RIGHT, storev,
M1, K, N2, L,
A1, LDA1,
A2, LDA2,
V, LDV,
WORK, LDWORK);
/* W = W * op(T) */
// should use KBLAS
cublasDtrmm(
handle, side, CUBLAS_FILL_MODE_UPPER,
trans, CUBLAS_DIAG_NON_UNIT, M2, K,
&zone, T, LDT, WORK, LDWORK, WORK, LDWORK);
//cblas_dtrmm(
//CblasColMajor, CblasRight, CblasUpper,
//(CBLAS_TRANSPOSE)trans, CblasNonUnit, M2, K,
//(zone), T, LDT, WORK, LDWORK);
/* A1 = A1 - W */
for(j = 0; j < K; j++) {
cublasDaxpy(
handle, M1, &mzone,
&WORK[LDWORK*j], 1, &A1[LDA1*j], 1);
//cblas_daxpy(
//M1, (mzone),
//&WORK[LDWORK*j], 1,
//&A1[LDA1*j], 1);
}
/* A2 = A2 - W * op(V) */
/* W also changes: W = W * V', A2 = A2 - W */
cublasDpamm(
handle, PlasmaA2, CUBLAS_SIDE_RIGHT, storev,
M2, N2, K, L,
A1, LDA1,
A2, LDA2,
V, LDV,
WORK, LDWORK);
}
}
else {
coreblas_error(3, "Not implemented (Backward / Left or Right)");
return PLASMA_ERR_NOT_SUPPORTED;
}
return PLASMA_SUCCESS;
}
|
c1a8d3ddeb44eb09de6543b772383494a8b071f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer {
hipEvent_t start;
hipEvent_t stop;
GpuTimer() {
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer() {
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start() {
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop() { hipEventRecord(stop, 0); }
float Elapsed() {
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void printArray(const uint32_t *a, int n) {
for (int i = 0; i < n; i++)
printf("%2i ", a[i]);
printf("\n");
}
void sortByThrust(const uint32_t *in, int n, uint32_t *out, int nBits) {
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
__global__ void computeHistKernel(uint32_t *in, int n, uint32_t *hist,
int nBins, int bit) {
// TODO
extern __shared__ int s_hist[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) {
s_hist[s_i] = 0;
}
__syncthreads();
// Each block computes its local hist using atomic on SMEM
if (i < n) {
int bin = (in[i] >> bit) & (nBins - 1);
atomicAdd(&s_hist[bin], 1);
}
__syncthreads();
// transpose
for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) {
hist[gridDim.x * s_i + blockIdx.x] = s_hist[s_i];
}
}
__global__ void scanBlkKernel(uint32_t *in, int n, uint32_t *out,
uint32_t *blkSums) {
// TODO
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) {
return;
}
extern __shared__ uint32_t s_in[];
s_in[threadIdx.x] = in[i];
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int strideVal;
if (threadIdx.x >= stride) {
strideVal = s_in[threadIdx.x - stride];
}
__syncthreads();
if (threadIdx.x >= stride) {
s_in[threadIdx.x] += strideVal;
}
__syncthreads();
}
if (blkSums && threadIdx.x == blockDim.x - 1) {
blkSums[blockIdx.x] = s_in[threadIdx.x];
}
out[i] = s_in[threadIdx.x];
}
// TODO: You can define necessary functions here
__global__ void addBlkSums(uint32_t *in, int n, uint32_t *blkSums) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) {
return;
}
in[i] += blkSums[blockIdx.x];
}
__global__ void scatter(const uint32_t *in, int n, const uint32_t *histScan,
uint32_t *out, int nBins, int bit, int nBits) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t s_n = blockDim.x;
//// init smem
extern __shared__ uint32_t s_data[];
uint32_t *s_in = (uint32_t *)s_data;
uint32_t *s_inBin = (uint32_t *)(s_in + s_n);
uint32_t *s_inBinScan = (uint32_t *)(s_inBin + s_n);
uint32_t *s_out = (uint32_t *)(s_inBinScan + s_n);
uint32_t *s_outBin = (uint32_t *)(s_out + s_n);
uint32_t *s_startIdx = (uint32_t *)(s_outBin + s_n);
if (i >= n) {
s_inBin[threadIdx.x] = nBins - 1;
} else {
s_in[threadIdx.x] = in[i];
s_inBin[threadIdx.x] = (s_in[threadIdx.x] >> bit) & (nBins - 1);
}
__syncthreads();
//// sort smem using radix sort with 1-bit
for (int b = 0; b < nBits; ++b) {
/* printf("Doing bit: #%d\n", b); */
// exclusive scan
if (threadIdx.x == 0) {
s_inBinScan[threadIdx.x] = 0;
} else {
s_inBinScan[threadIdx.x] = (s_inBin[threadIdx.x - 1] >> b) & 1;
}
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int strideVal;
if (threadIdx.x >= stride) {
strideVal = s_inBinScan[threadIdx.x - stride];
}
__syncthreads();
if (threadIdx.x >= stride) {
s_inBinScan[threadIdx.x] += strideVal;
}
__syncthreads();
}
// scatter
uint32_t rank;
if ((s_inBin[threadIdx.x] >> b) & 1) {
const uint32_t nZeros =
s_n - s_inBinScan[s_n - 1] - ((s_inBin[s_n - 1] >> b) & 1);
rank = nZeros + s_inBinScan[threadIdx.x];
} else {
rank = threadIdx.x - s_inBinScan[threadIdx.x];
}
s_outBin[rank] = s_inBin[threadIdx.x];
s_out[rank] = s_in[threadIdx.x];
__syncthreads();
s_inBin[threadIdx.x] = s_outBin[threadIdx.x];
s_in[threadIdx.x] = s_out[threadIdx.x];
__syncthreads();
}
/* out[i] = s_inBin[threadIdx.x]; */
//// calculate start index
if (threadIdx.x == 0 || s_inBin[threadIdx.x] != s_inBin[threadIdx.x - 1]) {
s_startIdx[s_inBin[threadIdx.x]] = threadIdx.x;
}
if (i >= n) {
return;
}
__syncthreads();
//// calculate number of elements at lower index that equals to current
//// elemement
uint32_t preCount = threadIdx.x - s_startIdx[s_inBin[threadIdx.x]];
//// scatter
uint32_t rank =
histScan[gridDim.x * s_inBin[threadIdx.x] + blockIdx.x] + preCount;
out[rank] = s_in[threadIdx.x];
}
void printDeviceArray(const uint32_t *d_arr, int n) {
const int BYTES = n * sizeof(*d_arr);
uint32_t *arr = (uint32_t *)malloc(BYTES);
hipMemcpy(arr, d_arr, BYTES, hipMemcpyDeviceToHost);
printArray(arr, n);
free(arr);
}
void checkCorrectness(uint32_t *out, uint32_t *correctOut, int n) {
for (int i = 0; i < n; i++) {
if (out[i] != correctOut[i]) {
printf("INCORRECT :( %d/%d\n", i, n);
return;
}
}
printf("CORRECT :)\n");
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel
// scan in counting sort Assume: nBits (k in slides) in {1, 2, 4, 8, 16} Why
// "int * blockSizes"? Because we may want different block sizes for diffrent
// kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t *in, int n, uint32_t *out, int nBits,
int *blockSizes) {
// TODO
const int nBins = 1 << nBits;
const dim3 histBlockSize = dim3(blockSizes[0]);
const int histBlockCount = (n - 1) / histBlockSize.x + 1;
const dim3 histGridSize = dim3(histBlockCount);
const dim3 scanBlockSize = dim3(blockSizes[1]);
const int scanBlockCount = (nBins * histBlockCount - 1) / scanBlockSize.x + 1;
const dim3 scanGridSize = dim3(scanBlockCount);
const size_t ARRAY_BYTES = n * sizeof(uint32_t);
const size_t HIST_SMEM_BYTES = nBins * sizeof(uint32_t);
const size_t HIST_BYTES = histBlockCount * HIST_SMEM_BYTES;
const size_t BLKSUMS_BYTES = scanBlockCount * sizeof(uint32_t);
const size_t SCAN_SMEM_BYTES = scanBlockSize.x * sizeof(uint32_t);
const size_t SCATTER_SMEM_BYTES =
5 * histBlockSize.x * sizeof(uint32_t) + HIST_SMEM_BYTES;
uint32_t *d_in;
uint32_t *d_out;
uint32_t *d_hist; // contains all the transposed local histogram of all blocks
uint32_t *d_histScan;
uint32_t *d_blkSums;
uint32_t *blkSums = (uint32_t *)malloc(BLKSUMS_BYTES);
CHECK(hipMalloc(&d_in, ARRAY_BYTES));
CHECK(hipMalloc(&d_out, ARRAY_BYTES));
CHECK(hipMalloc(&d_hist, HIST_BYTES));
CHECK(hipMalloc(&d_histScan, HIST_BYTES));
CHECK(hipMalloc(&d_blkSums, BLKSUMS_BYTES));
CHECK(hipMemcpy(d_in, in, ARRAY_BYTES, hipMemcpyHostToDevice));
/* printf("IN: "); */
/* printArray(in, n); */
GpuTimer timer;
for (int bit = 0; bit < 8 * sizeof(uint32_t); bit += nBits) {
printf("#%d (iteration):\n", bit/nBits + 1);
//Step 1: Calculate local histogram of each block, transpose and copy to d_hist
printf(" + Step 1. Local histogram. ");
timer.Start();
hipLaunchKernelGGL(( computeHistKernel), dim3(histGridSize), dim3(histBlockSize), HIST_SMEM_BYTES, 0,
d_in, n, d_hist, nBins, bit);
CHECK(hipGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
// Step 2: Scan d_hist
printf(" + Step 2. Exclusive scan. ");
timer.Start();
// scan per block
CHECK(hipMemset(d_histScan, 0, sizeof(uint32_t)));
hipLaunchKernelGGL(( scanBlkKernel), dim3(scanGridSize), dim3(scanBlockSize), SCAN_SMEM_BYTES, 0,
d_hist, histBlockCount * nBins - 1, d_histScan + 1, d_blkSums);
CHECK(hipGetLastError());
// scan blksums:
CHECK(
hipMemcpy(blkSums, d_blkSums, BLKSUMS_BYTES, hipMemcpyDeviceToHost));
for (int i = 1; i < scanBlockCount; ++i) {
blkSums[i] += blkSums[i - 1];
}
CHECK(
hipMemcpy(d_blkSums, blkSums, BLKSUMS_BYTES, hipMemcpyHostToDevice));
// add scanned blkSums
hipLaunchKernelGGL(( addBlkSums), dim3(scanGridSize), dim3(scanBlockSize), 0, 0,
d_histScan + scanBlockSize.x + 1,
histBlockCount * nBins - scanBlockSize.x - 1, d_blkSums);
CHECK(hipGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
// Step 3: scatter
printf(" + Step 3. Scatter. ");
timer.Start();
hipLaunchKernelGGL(( scatter), dim3(histGridSize), dim3(histBlockSize), SCATTER_SMEM_BYTES, 0,
d_in, n, d_histScan, d_out, nBins, bit, nBits);
CHECK(hipGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
uint32_t *tmp = d_in;
d_in = d_out;
d_out = tmp;
}
hipMemcpy(out, d_in, ARRAY_BYTES, hipMemcpyDeviceToHost);
free(blkSums);
CHECK(hipFree(d_in));
CHECK(hipFree(d_out));
CHECK(hipFree(d_hist));
CHECK(hipFree(d_histScan));
CHECK(hipFree(d_blkSums));
}
// Radix sort
void sort(const uint32_t *in, int n, uint32_t *out, int nBits,
bool useThrust = false, int *blockSizes = NULL) {
GpuTimer timer;
timer.Start();
if (useThrust == false) {
printf("\nRadix sort by thrust\n");
sortByThrust(in, n, out, nBits);
} else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo() {
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n",
devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
int main(int argc, char **argv) {
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
/* n = 17; */
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t *in = (uint32_t *)malloc(bytes);
uint32_t *out = (uint32_t *)malloc(bytes); // Device result
uint32_t *correctOut = (uint32_t *)malloc(bytes); // Thrust result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
/* in[i] = rand() % 16; */
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4) {
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0],
blockSizes[1]);
// SORT BY THRUST
sort(in, n, correctOut, nBits);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
| c1a8d3ddeb44eb09de6543b772383494a8b071f6.cu | #include <stdint.h>
#include <stdio.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer {
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer() {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start() {
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop() { cudaEventRecord(stop, 0); }
float Elapsed() {
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void printArray(const uint32_t *a, int n) {
for (int i = 0; i < n; i++)
printf("%2i ", a[i]);
printf("\n");
}
void sortByThrust(const uint32_t *in, int n, uint32_t *out, int nBits) {
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
__global__ void computeHistKernel(uint32_t *in, int n, uint32_t *hist,
int nBins, int bit) {
// TODO
extern __shared__ int s_hist[];
const int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) {
s_hist[s_i] = 0;
}
__syncthreads();
// Each block computes its local hist using atomic on SMEM
if (i < n) {
int bin = (in[i] >> bit) & (nBins - 1);
atomicAdd(&s_hist[bin], 1);
}
__syncthreads();
// transpose
for (int s_i = threadIdx.x; s_i < nBins; s_i += blockDim.x) {
hist[gridDim.x * s_i + blockIdx.x] = s_hist[s_i];
}
}
__global__ void scanBlkKernel(uint32_t *in, int n, uint32_t *out,
uint32_t *blkSums) {
// TODO
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) {
return;
}
extern __shared__ uint32_t s_in[];
s_in[threadIdx.x] = in[i];
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int strideVal;
if (threadIdx.x >= stride) {
strideVal = s_in[threadIdx.x - stride];
}
__syncthreads();
if (threadIdx.x >= stride) {
s_in[threadIdx.x] += strideVal;
}
__syncthreads();
}
if (blkSums && threadIdx.x == blockDim.x - 1) {
blkSums[blockIdx.x] = s_in[threadIdx.x];
}
out[i] = s_in[threadIdx.x];
}
// TODO: You can define necessary functions here
__global__ void addBlkSums(uint32_t *in, int n, uint32_t *blkSums) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) {
return;
}
in[i] += blkSums[blockIdx.x];
}
__global__ void scatter(const uint32_t *in, int n, const uint32_t *histScan,
uint32_t *out, int nBins, int bit, int nBits) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t s_n = blockDim.x;
//// init smem
extern __shared__ uint32_t s_data[];
uint32_t *s_in = (uint32_t *)s_data;
uint32_t *s_inBin = (uint32_t *)(s_in + s_n);
uint32_t *s_inBinScan = (uint32_t *)(s_inBin + s_n);
uint32_t *s_out = (uint32_t *)(s_inBinScan + s_n);
uint32_t *s_outBin = (uint32_t *)(s_out + s_n);
uint32_t *s_startIdx = (uint32_t *)(s_outBin + s_n);
if (i >= n) {
s_inBin[threadIdx.x] = nBins - 1;
} else {
s_in[threadIdx.x] = in[i];
s_inBin[threadIdx.x] = (s_in[threadIdx.x] >> bit) & (nBins - 1);
}
__syncthreads();
//// sort smem using radix sort with 1-bit
for (int b = 0; b < nBits; ++b) {
/* printf("Doing bit: #%d\n", b); */
// exclusive scan
if (threadIdx.x == 0) {
s_inBinScan[threadIdx.x] = 0;
} else {
s_inBinScan[threadIdx.x] = (s_inBin[threadIdx.x - 1] >> b) & 1;
}
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int strideVal;
if (threadIdx.x >= stride) {
strideVal = s_inBinScan[threadIdx.x - stride];
}
__syncthreads();
if (threadIdx.x >= stride) {
s_inBinScan[threadIdx.x] += strideVal;
}
__syncthreads();
}
// scatter
uint32_t rank;
if ((s_inBin[threadIdx.x] >> b) & 1) {
const uint32_t nZeros =
s_n - s_inBinScan[s_n - 1] - ((s_inBin[s_n - 1] >> b) & 1);
rank = nZeros + s_inBinScan[threadIdx.x];
} else {
rank = threadIdx.x - s_inBinScan[threadIdx.x];
}
s_outBin[rank] = s_inBin[threadIdx.x];
s_out[rank] = s_in[threadIdx.x];
__syncthreads();
s_inBin[threadIdx.x] = s_outBin[threadIdx.x];
s_in[threadIdx.x] = s_out[threadIdx.x];
__syncthreads();
}
/* out[i] = s_inBin[threadIdx.x]; */
//// calculate start index
if (threadIdx.x == 0 || s_inBin[threadIdx.x] != s_inBin[threadIdx.x - 1]) {
s_startIdx[s_inBin[threadIdx.x]] = threadIdx.x;
}
if (i >= n) {
return;
}
__syncthreads();
//// calculate number of elements at lower index that equals to current
//// elemement
uint32_t preCount = threadIdx.x - s_startIdx[s_inBin[threadIdx.x]];
//// scatter
uint32_t rank =
histScan[gridDim.x * s_inBin[threadIdx.x] + blockIdx.x] + preCount;
out[rank] = s_in[threadIdx.x];
}
void printDeviceArray(const uint32_t *d_arr, int n) {
const int BYTES = n * sizeof(*d_arr);
uint32_t *arr = (uint32_t *)malloc(BYTES);
cudaMemcpy(arr, d_arr, BYTES, cudaMemcpyDeviceToHost);
printArray(arr, n);
free(arr);
}
void checkCorrectness(uint32_t *out, uint32_t *correctOut, int n) {
for (int i = 0; i < n; i++) {
if (out[i] != correctOut[i]) {
printf("INCORRECT :( %d/%d\n", i, n);
return;
}
}
printf("CORRECT :)\n");
}
// (Partially) Parallel radix sort: implement parallel histogram and parallel
// scan in counting sort Assume: nBits (k in slides) in {1, 2, 4, 8, 16} Why
// "int * blockSizes"? Because we may want different block sizes for diffrent
// kernels:
// blockSizes[0] for the histogram kernel
// blockSizes[1] for the scan kernel
void sortByDevice(const uint32_t *in, int n, uint32_t *out, int nBits,
int *blockSizes) {
// TODO
const int nBins = 1 << nBits;
const dim3 histBlockSize = dim3(blockSizes[0]);
const int histBlockCount = (n - 1) / histBlockSize.x + 1;
const dim3 histGridSize = dim3(histBlockCount);
const dim3 scanBlockSize = dim3(blockSizes[1]);
const int scanBlockCount = (nBins * histBlockCount - 1) / scanBlockSize.x + 1;
const dim3 scanGridSize = dim3(scanBlockCount);
const size_t ARRAY_BYTES = n * sizeof(uint32_t);
const size_t HIST_SMEM_BYTES = nBins * sizeof(uint32_t);
const size_t HIST_BYTES = histBlockCount * HIST_SMEM_BYTES;
const size_t BLKSUMS_BYTES = scanBlockCount * sizeof(uint32_t);
const size_t SCAN_SMEM_BYTES = scanBlockSize.x * sizeof(uint32_t);
const size_t SCATTER_SMEM_BYTES =
5 * histBlockSize.x * sizeof(uint32_t) + HIST_SMEM_BYTES;
uint32_t *d_in;
uint32_t *d_out;
uint32_t *d_hist; // contains all the transposed local histogram of all blocks
uint32_t *d_histScan;
uint32_t *d_blkSums;
uint32_t *blkSums = (uint32_t *)malloc(BLKSUMS_BYTES);
CHECK(cudaMalloc(&d_in, ARRAY_BYTES));
CHECK(cudaMalloc(&d_out, ARRAY_BYTES));
CHECK(cudaMalloc(&d_hist, HIST_BYTES));
CHECK(cudaMalloc(&d_histScan, HIST_BYTES));
CHECK(cudaMalloc(&d_blkSums, BLKSUMS_BYTES));
CHECK(cudaMemcpy(d_in, in, ARRAY_BYTES, cudaMemcpyHostToDevice));
/* printf("IN: "); */
/* printArray(in, n); */
GpuTimer timer;
for (int bit = 0; bit < 8 * sizeof(uint32_t); bit += nBits) {
printf("#%d (iteration):\n", bit/nBits + 1);
//Step 1: Calculate local histogram of each block, transpose and copy to d_hist
printf(" + Step 1. Local histogram. ");
timer.Start();
computeHistKernel<<<histGridSize, histBlockSize, HIST_SMEM_BYTES>>>(
d_in, n, d_hist, nBins, bit);
CHECK(cudaGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
// Step 2: Scan d_hist
printf(" + Step 2. Exclusive scan. ");
timer.Start();
// scan per block
CHECK(cudaMemset(d_histScan, 0, sizeof(uint32_t)));
scanBlkKernel<<<scanGridSize, scanBlockSize, SCAN_SMEM_BYTES>>>(
d_hist, histBlockCount * nBins - 1, d_histScan + 1, d_blkSums);
CHECK(cudaGetLastError());
// scan blksums:
CHECK(
cudaMemcpy(blkSums, d_blkSums, BLKSUMS_BYTES, cudaMemcpyDeviceToHost));
for (int i = 1; i < scanBlockCount; ++i) {
blkSums[i] += blkSums[i - 1];
}
CHECK(
cudaMemcpy(d_blkSums, blkSums, BLKSUMS_BYTES, cudaMemcpyHostToDevice));
// add scanned blkSums
addBlkSums<<<scanGridSize, scanBlockSize>>>(
d_histScan + scanBlockSize.x + 1,
histBlockCount * nBins - scanBlockSize.x - 1, d_blkSums);
CHECK(cudaGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
// Step 3: scatter
printf(" + Step 3. Scatter. ");
timer.Start();
scatter<<<histGridSize, histBlockSize, SCATTER_SMEM_BYTES>>>(
d_in, n, d_histScan, d_out, nBins, bit, nBits);
CHECK(cudaGetLastError());
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
uint32_t *tmp = d_in;
d_in = d_out;
d_out = tmp;
}
cudaMemcpy(out, d_in, ARRAY_BYTES, cudaMemcpyDeviceToHost);
free(blkSums);
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
CHECK(cudaFree(d_hist));
CHECK(cudaFree(d_histScan));
CHECK(cudaFree(d_blkSums));
}
// Radix sort
void sort(const uint32_t *in, int n, uint32_t *out, int nBits,
bool useThrust = false, int *blockSizes = NULL) {
GpuTimer timer;
timer.Start();
if (useThrust == false) {
printf("\nRadix sort by thrust\n");
sortByThrust(in, n, out, nBits);
} else // use device
{
printf("\nRadix sort by device\n");
sortByDevice(in, n, out, nBits, blockSizes);
}
timer.Stop();
printf("Time: %.3f ms\n", timer.Elapsed());
}
void printDeviceInfo() {
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n",
devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
int main(int argc, char **argv) {
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int n = (1 << 24) + 1;
/* n = 17; */
printf("\nInput size: %d\n", n);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t *in = (uint32_t *)malloc(bytes);
uint32_t *out = (uint32_t *)malloc(bytes); // Device result
uint32_t *correctOut = (uint32_t *)malloc(bytes); // Thrust result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
/* in[i] = rand() % 16; */
// SET UP NBITS
int nBits = 4; // Default
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nNum bits per digit: %d\n", nBits);
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4) {
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0],
blockSizes[1]);
// SORT BY THRUST
sort(in, n, correctOut, nBits);
// SORT BY DEVICE
sort(in, n, out, nBits, true, blockSizes);
checkCorrectness(out, correctOut, n);
// FREE MEMORIES
free(in);
free(out);
free(correctOut);
return EXIT_SUCCESS;
}
|
f2baf422309583bfd7e046724cdee02ec7df1cc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// --------------------------------------------------------
// --------------------------------------------------------
#include <aggregation/coarseAgenerators/thrust_coarse_A_generator.h>
#include <thrust/system/detail/generic/reduce_by_key.h>
#include <thrust/remove.h>
#include <thrust/iterator/transform_iterator.h>
#include <error.h>
#include <cutil.h>
#include <types.h>
#include <cusp/detail/format_utils.h>
namespace amgx
{
namespace aggregation
{
typedef thrust::tuple<int, int> tuple_t;
// --------------------
// Kernels
// --------------------
// Kernel to store aggregate I of each fine point index i
template <typename IndexType>
__global__
void iToIKernel(const IndexType *row_offsets, const IndexType *aggregates, IndexType *I, const int num_rows)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x)
{
int agg = aggregates[tid];
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
I[j] = agg;
}
}
}
// Kernel to store aggregate J of each fine point index j
template <typename IndexType>
__global__
void jToJKernel(const IndexType *column_indices, const IndexType *aggregates, IndexType *J, const int num_entries)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_entries; tid += gridDim.x * blockDim.x)
{
int j = column_indices[tid];
J[tid] = aggregates[j];
}
}
// Constructor
template<class T_Config>
ThrustCoarseAGeneratorBase<T_Config>::ThrustCoarseAGeneratorBase()
{
}
//-----------------------------------------------------
// Method to compute the Galerkin product: A_c=R*A*P
//-----------------------------------------------------
// Method to compute A on DEVICE using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void ThrustCoarseAGenerator<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1(const Matrix_d &A, Matrix_d &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
if (A.hasProps(DIAG))
{
FatalError("ThrustCoarseAGenerator: unsupported diagonal", AMGX_ERR_NOT_IMPLEMENTED);
}
cudaCheckError();
IVector I(A.get_num_nz(), -1);
IVector J(A.get_num_nz(), -1);
VVector V(A.get_num_nz(), -1);
const int block_size_I = 128;
const int block_size_J = 256;
const int num_blocks_I = min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_rows() - 1) / block_size_I + 1) );
const int num_blocks_J = min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_nz() - 1) / block_size_J + 1) );
const IndexType *row_offsets_ptr = A.row_offsets.raw();
const IndexType *column_indices_ptr = A.col_indices.raw();
const IndexType *aggregates_ptr = aggregates.raw();
IndexType *I_ptr = I.raw();
IndexType *J_ptr = J.raw();
// Kernel to fill array I with aggregates number for fine points i
hipLaunchKernelGGL(( iToIKernel) , dim3(num_blocks_I), dim3(block_size_I), 0, 0, row_offsets_ptr, aggregates_ptr, I_ptr, (int)A.get_num_rows());
cudaCheckError();
// Kernel to fill array J with aggregates number for fine points j
hipLaunchKernelGGL(( jToJKernel) , dim3(num_blocks_J), dim3(block_size_J), 0, 0, column_indices_ptr, aggregates_ptr, J_ptr, (int)A.get_num_nz());
cudaCheckError();
// Copy A.values to V array
thrust::copy(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), V.begin());
cudaCheckError();
// Sort (I,J,V) by rows and columns (I,J)
cusp::detail::sort_by_row_and_column(I, J, V);
cudaCheckError();
// compute unique number of nonzeros in the output
IndexType NNZ = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
IndexType(0),
thrust::plus<IndexType>(),
thrust::not_equal_to< thrust::tuple<IndexType, IndexType> >()) + 1;
cudaCheckError();
// allocate space for coarse matrix Ac
Ac.addProps(CSR);
if (A.hasProps(DIAG)) { Ac.addProps(DIAG); }
if (A.is_matrix_singleGPU())
{
Ac.resize(num_aggregates, num_aggregates, NNZ, 1);
}
else
{
Ac.resize_spare(num_aggregates, num_aggregates, NNZ, A.get_block_dimy(), A.get_block_dimx(), 1.0);
if (A.hasProps(DIAG)) { Ac.computeDiagonal(); }
}
// Reduce by key to fill in Ac.column_indices and Ac.values
IVector new_row_indices(NNZ, 0);
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(new_row_indices.begin(), Ac.col_indices.begin())),
Ac.values.begin(),
thrust::equal_to< thrust::tuple<IndexType, IndexType> >(),
thrust::plus<ValueType>());
cudaCheckError();
// Convert array new_row_indices to offsets
cusp::detail::indices_to_offsets(new_row_indices, Ac.row_offsets);
cudaCheckError();
I.clear();
I.shrink_to_fit();
J.clear();
J.shrink_to_fit();
V.clear();
V.shrink_to_fit();
}
// Method to compute A on HOST using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void ThrustCoarseAGenerator<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1(const Matrix_h &A, Matrix_h &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
if (A.hasProps(DIAG))
{
FatalError("ThrustCoarseAGenerator: unsupported diagonal", AMGX_ERR_NOT_IMPLEMENTED);
}
IVector I(A.get_num_nz(), -1);
IVector J(A.get_num_nz(), -1);
VVector V(A.get_num_nz(), -1);
const IndexType *row_offsets_ptr = A.row_offsets.raw();
const IndexType *column_indices_ptr = A.col_indices.raw();
const IndexType *aggregates_ptr = aggregates.raw();
IndexType *I_ptr = I.raw();
IndexType *J_ptr = J.raw();
// Kernel to fill array I with aggregates number for fine points i
for ( int tid = 0; tid < (int)A.get_num_rows(); tid++ )
{
int agg = aggregates_ptr[tid];
for (int j = row_offsets_ptr[tid]; j < row_offsets_ptr[tid + 1]; j++)
{
I_ptr[j] = agg;
}
}
// Kernel to fill array J with aggregates number for fine points j
for ( int tid = 0; tid < (int)A.get_num_nz(); tid++ )
{
int j = column_indices_ptr[tid];
J_ptr[tid] = aggregates_ptr[j];
}
// Copy A.values to V array
thrust::copy(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), V.begin());
cudaCheckError();
// Sort (I,J,V) by rows and columns (I,J)
cusp::detail::sort_by_row_and_column(I, J, V);
cudaCheckError();
// compute unique number of nonzeros in the output
IndexType NNZ = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
IndexType(0),
thrust::plus<IndexType>(),
thrust::not_equal_to< thrust::tuple<IndexType, IndexType> >()) + 1;
cudaCheckError();
// allocate space for coarse matrix Ac
Ac.addProps(CSR);
if (A.hasProps(DIAG)) { Ac.addProps(DIAG); }
if (A.is_matrix_singleGPU())
{
Ac.resize(num_aggregates, num_aggregates, NNZ, 1);
}
else
{
Ac.resize_spare(num_aggregates, num_aggregates, NNZ, A.get_block_dimy(), A.get_block_dimx(), 1.0);
if (A.hasProps(DIAG)) { Ac.computeDiagonal(); }
}
// Reduce by key to fill in Ac.column_indices and Ac.values
typename Matrix_h::IVector new_row_indices(NNZ, 0);
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(new_row_indices.begin(), Ac.col_indices.begin())),
Ac.values.begin(),
thrust::equal_to< thrust::tuple<IndexType, IndexType> >(),
thrust::plus<ValueType>());
cudaCheckError();
// Convert array new_row_indices to offsets
cusp::detail::indices_to_offsets(new_row_indices, Ac.row_offsets);
cudaCheckError();
I.clear();
I.shrink_to_fit();
J.clear();
J.shrink_to_fit();
V.clear();
V.shrink_to_fit();
}
// ------------------------------------------------
template <class T_Config>
void ThrustCoarseAGeneratorBase<T_Config>::computeAOperator(const Matrix<T_Config> &A, Matrix<T_Config> &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
Ac.set_initialized(0);
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
computeAOperator_1x1( A, Ac, aggregates, R_row_offsets, R_column_indices, num_aggregates );
}
else
{
FatalError("Unsupported block size for ThrustCoarseAGenerator", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if (Ac.is_matrix_singleGPU()) { Ac.computeDiagonal(); }
Ac.set_initialized(1);
}
// ---------------------------
// Explict instantiations
// ---------------------------
#define AMGX_CASE_LINE(CASE) template class ThrustCoarseAGeneratorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class ThrustCoarseAGenerator<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
| f2baf422309583bfd7e046724cdee02ec7df1cc7.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// --------------------------------------------------------
// --------------------------------------------------------
#include <aggregation/coarseAgenerators/thrust_coarse_A_generator.h>
#include <thrust/system/detail/generic/reduce_by_key.h>
#include <thrust/remove.h>
#include <thrust/iterator/transform_iterator.h>
#include <error.h>
#include <cutil.h>
#include <types.h>
#include <cusp/detail/format_utils.h>
namespace amgx
{
namespace aggregation
{
typedef thrust::tuple<int, int> tuple_t;
// --------------------
// Kernels
// --------------------
// Kernel to store aggregate I of each fine point index i
template <typename IndexType>
__global__
void iToIKernel(const IndexType *row_offsets, const IndexType *aggregates, IndexType *I, const int num_rows)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x)
{
int agg = aggregates[tid];
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
I[j] = agg;
}
}
}
// Kernel to store aggregate J of each fine point index j
template <typename IndexType>
__global__
void jToJKernel(const IndexType *column_indices, const IndexType *aggregates, IndexType *J, const int num_entries)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_entries; tid += gridDim.x * blockDim.x)
{
int j = column_indices[tid];
J[tid] = aggregates[j];
}
}
// Constructor
template<class T_Config>
ThrustCoarseAGeneratorBase<T_Config>::ThrustCoarseAGeneratorBase()
{
}
//-----------------------------------------------------
// Method to compute the Galerkin product: A_c=R*A*P
//-----------------------------------------------------
// Method to compute A on DEVICE using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void ThrustCoarseAGenerator<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1(const Matrix_d &A, Matrix_d &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
if (A.hasProps(DIAG))
{
FatalError("ThrustCoarseAGenerator: unsupported diagonal", AMGX_ERR_NOT_IMPLEMENTED);
}
cudaCheckError();
IVector I(A.get_num_nz(), -1);
IVector J(A.get_num_nz(), -1);
VVector V(A.get_num_nz(), -1);
const int block_size_I = 128;
const int block_size_J = 256;
const int num_blocks_I = min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_rows() - 1) / block_size_I + 1) );
const int num_blocks_J = min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_nz() - 1) / block_size_J + 1) );
const IndexType *row_offsets_ptr = A.row_offsets.raw();
const IndexType *column_indices_ptr = A.col_indices.raw();
const IndexType *aggregates_ptr = aggregates.raw();
IndexType *I_ptr = I.raw();
IndexType *J_ptr = J.raw();
// Kernel to fill array I with aggregates number for fine points i
iToIKernel <<< num_blocks_I, block_size_I>>>(row_offsets_ptr, aggregates_ptr, I_ptr, (int)A.get_num_rows());
cudaCheckError();
// Kernel to fill array J with aggregates number for fine points j
jToJKernel <<< num_blocks_J, block_size_J>>>(column_indices_ptr, aggregates_ptr, J_ptr, (int)A.get_num_nz());
cudaCheckError();
// Copy A.values to V array
thrust::copy(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), V.begin());
cudaCheckError();
// Sort (I,J,V) by rows and columns (I,J)
cusp::detail::sort_by_row_and_column(I, J, V);
cudaCheckError();
// compute unique number of nonzeros in the output
IndexType NNZ = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
IndexType(0),
thrust::plus<IndexType>(),
thrust::not_equal_to< thrust::tuple<IndexType, IndexType> >()) + 1;
cudaCheckError();
// allocate space for coarse matrix Ac
Ac.addProps(CSR);
if (A.hasProps(DIAG)) { Ac.addProps(DIAG); }
if (A.is_matrix_singleGPU())
{
Ac.resize(num_aggregates, num_aggregates, NNZ, 1);
}
else
{
Ac.resize_spare(num_aggregates, num_aggregates, NNZ, A.get_block_dimy(), A.get_block_dimx(), 1.0);
if (A.hasProps(DIAG)) { Ac.computeDiagonal(); }
}
// Reduce by key to fill in Ac.column_indices and Ac.values
IVector new_row_indices(NNZ, 0);
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(new_row_indices.begin(), Ac.col_indices.begin())),
Ac.values.begin(),
thrust::equal_to< thrust::tuple<IndexType, IndexType> >(),
thrust::plus<ValueType>());
cudaCheckError();
// Convert array new_row_indices to offsets
cusp::detail::indices_to_offsets(new_row_indices, Ac.row_offsets);
cudaCheckError();
I.clear();
I.shrink_to_fit();
J.clear();
J.shrink_to_fit();
V.clear();
V.shrink_to_fit();
}
// Method to compute A on HOST using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void ThrustCoarseAGenerator<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_1x1(const Matrix_h &A, Matrix_h &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
if (A.hasProps(DIAG))
{
FatalError("ThrustCoarseAGenerator: unsupported diagonal", AMGX_ERR_NOT_IMPLEMENTED);
}
IVector I(A.get_num_nz(), -1);
IVector J(A.get_num_nz(), -1);
VVector V(A.get_num_nz(), -1);
const IndexType *row_offsets_ptr = A.row_offsets.raw();
const IndexType *column_indices_ptr = A.col_indices.raw();
const IndexType *aggregates_ptr = aggregates.raw();
IndexType *I_ptr = I.raw();
IndexType *J_ptr = J.raw();
// Kernel to fill array I with aggregates number for fine points i
for ( int tid = 0; tid < (int)A.get_num_rows(); tid++ )
{
int agg = aggregates_ptr[tid];
for (int j = row_offsets_ptr[tid]; j < row_offsets_ptr[tid + 1]; j++)
{
I_ptr[j] = agg;
}
}
// Kernel to fill array J with aggregates number for fine points j
for ( int tid = 0; tid < (int)A.get_num_nz(); tid++ )
{
int j = column_indices_ptr[tid];
J_ptr[tid] = aggregates_ptr[j];
}
// Copy A.values to V array
thrust::copy(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), V.begin());
cudaCheckError();
// Sort (I,J,V) by rows and columns (I,J)
cusp::detail::sort_by_row_and_column(I, J, V);
cudaCheckError();
// compute unique number of nonzeros in the output
IndexType NNZ = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1,
thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1,
IndexType(0),
thrust::plus<IndexType>(),
thrust::not_equal_to< thrust::tuple<IndexType, IndexType> >()) + 1;
cudaCheckError();
// allocate space for coarse matrix Ac
Ac.addProps(CSR);
if (A.hasProps(DIAG)) { Ac.addProps(DIAG); }
if (A.is_matrix_singleGPU())
{
Ac.resize(num_aggregates, num_aggregates, NNZ, 1);
}
else
{
Ac.resize_spare(num_aggregates, num_aggregates, NNZ, A.get_block_dimy(), A.get_block_dimx(), 1.0);
if (A.hasProps(DIAG)) { Ac.computeDiagonal(); }
}
// Reduce by key to fill in Ac.column_indices and Ac.values
typename Matrix_h::IVector new_row_indices(NNZ, 0);
thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())),
V.begin(),
thrust::make_zip_iterator(thrust::make_tuple(new_row_indices.begin(), Ac.col_indices.begin())),
Ac.values.begin(),
thrust::equal_to< thrust::tuple<IndexType, IndexType> >(),
thrust::plus<ValueType>());
cudaCheckError();
// Convert array new_row_indices to offsets
cusp::detail::indices_to_offsets(new_row_indices, Ac.row_offsets);
cudaCheckError();
I.clear();
I.shrink_to_fit();
J.clear();
J.shrink_to_fit();
V.clear();
V.shrink_to_fit();
}
// ------------------------------------------------
template <class T_Config>
void ThrustCoarseAGeneratorBase<T_Config>::computeAOperator(const Matrix<T_Config> &A, Matrix<T_Config> &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
Ac.set_initialized(0);
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
computeAOperator_1x1( A, Ac, aggregates, R_row_offsets, R_column_indices, num_aggregates );
}
else
{
FatalError("Unsupported block size for ThrustCoarseAGenerator", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if (Ac.is_matrix_singleGPU()) { Ac.computeDiagonal(); }
Ac.set_initialized(1);
}
// ---------------------------
// Explict instantiations
// ---------------------------
#define AMGX_CASE_LINE(CASE) template class ThrustCoarseAGeneratorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class ThrustCoarseAGenerator<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
|
f013be8613bd2ef01259fa53f44896bb16838578.hip | // !!! This is a file automatically generated by hipify!!!
/***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <edge_cuda.h>
#define MAX_THREADS_PER_BLOCK 512
#define NO_OF_NODES 65536
#define NO_OF_EDGES 393930
#define CU_CHECK_ERR(err) \
if ( err != hipSuccess ) { \
printf("CUDA Error: %s\n", hipGetErrorString(hipGetLastError())); \
abort(); \
}
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
int no_of_nodes = NO_OF_NODES;
int edge_list_size = NO_OF_EDGES;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
struct host_graph
{
Node h_graph_nodes[NO_OF_NODES];
bool h_graph_mask[NO_OF_NODES];
bool h_updating_graph_mask[NO_OF_NODES];
bool h_graph_visited[NO_OF_NODES];
int h_graph_edges[NO_OF_EDGES];
};
#include <kernel.cu>
#include <kernel2.cu>
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int bfs_main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
void init_graph_from_file(char* fname,host_graph* h_graph)
{
fp = fopen(fname,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int no_of_nodes;
int edge_list_size;
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
// allocate host memory
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph->h_graph_nodes[i].starting = start;
h_graph->h_graph_nodes[i].no_of_edges = edgeno;
h_graph->h_graph_mask[i]=false;
h_graph->h_updating_graph_mask[i]=false;
h_graph->h_graph_visited[i]=false;
if(i%100==0)
printf("%d nodes read \n",i);
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph->h_graph_mask[source]=true;
h_graph->h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d %d",&id,&cost);
h_graph->h_graph_edges[i] = id;
if(i%100==0)
printf("%d edges read \n",i);
}
if(fp)
fclose(fp);
printf("Read File\n");
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
input_f = "../../data/graph64k.txt";
//input_f = "/home/devalshah/Perforce/deval_shah/home/deval/gem5-gpu/benchmarks/data/4096nodes.in.rodinia";//argv[1];
//if(argc!=2){
//Usage(argc, argv);
//exit(0);
//}
int source=0;
host_graph* h_graph=(host_graph*) malloc(sizeof(host_graph)) ;
int no_of_nodes=NO_OF_NODES;
int edge_list_size=NO_OF_EDGES;
/*
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
*/
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
//init_graph_from_file(input_f,h_graph);
printf("Mem init trick - do bfs init in CPU\n");
CU_CHECK_ERR( edgeExtra(3, (void*)h_graph, 0, 0, input_f) );
//CU_CHECK_ERR( edgeExtra(2, (void*)lpm, 1, 1, IPV4_PREFIX_FILE) );
//argv[1];
printf("Reading File\n");
//Read in Graph from a file
//Copy the Node list to device memory
Node* d_graph_nodes;
hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
hipMemcpy( d_graph_nodes, h_graph->h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
hipMemcpy( d_graph_edges, h_graph->h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_mask, h_graph->h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_updating_graph_mask, h_graph->h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
hipMemcpy( d_graph_visited, h_graph->h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int* d_cost;
hipMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes);
//make a bool to check if the execution is over
bool *d_over;
hipMalloc( (void**) &d_over, sizeof(bool));
#ifdef GEM5_FUSION
//m5_work_begin(0, 0);
#endif
hipMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ;
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ;
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
hipMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ;
#ifdef GEM5_FUSION
//m5_work_end(0, 0);
#endif
//Store the result into a file
// FILE *fpo = fopen("result.txt","w");
FILE *fpo = stdout;
for(int i=0;i<no_of_nodes&&i<1000;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
// printf("Result stored in result.txt\n");
// cleanup memory
/*free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
*/
free( h_cost);
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
}
| f013be8613bd2ef01259fa53f44896bb16838578.cu | /***********************************************************************************
Implementing Breadth first search on CUDA using algorithm given in HiPC'07
paper "Accelerating Large Graph Algorithms on the GPU using CUDA"
Copyright (c) 2008 International Institute of Information Technology - Hyderabad.
All rights reserved.
Permission to use, copy, modify and distribute this software and its documentation for
educational purpose is hereby granted without fee, provided that the above copyright
notice and this permission notice appear in all copies of this software and that you do
not sell the software.
THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR
OTHERWISE.
Created by Pawan Harish.
************************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <edge_cuda.h>
#define MAX_THREADS_PER_BLOCK 512
#define NO_OF_NODES 65536
#define NO_OF_EDGES 393930
#define CU_CHECK_ERR(err) \
if ( err != cudaSuccess ) { \
printf("CUDA Error: %s\n", cudaGetErrorString(cudaGetLastError())); \
abort(); \
}
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
}
#endif
int no_of_nodes = NO_OF_NODES;
int edge_list_size = NO_OF_EDGES;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
struct host_graph
{
Node h_graph_nodes[NO_OF_NODES];
bool h_graph_mask[NO_OF_NODES];
bool h_updating_graph_mask[NO_OF_NODES];
bool h_graph_visited[NO_OF_NODES];
int h_graph_edges[NO_OF_EDGES];
};
#include <kernel.cu>
#include <kernel2.cu>
void BFSGraph(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int bfs_main( int argc, char** argv)
{
no_of_nodes=0;
edge_list_size=0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
void init_graph_from_file(char* fname,host_graph* h_graph)
{
fp = fopen(fname,"r");
if(!fp)
{
printf("Error Reading graph file\n");
return;
}
int no_of_nodes;
int edge_list_size;
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
// allocate host memory
int start, edgeno;
// initalize the memory
for( unsigned int i = 0; i < no_of_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
h_graph->h_graph_nodes[i].starting = start;
h_graph->h_graph_nodes[i].no_of_edges = edgeno;
h_graph->h_graph_mask[i]=false;
h_graph->h_updating_graph_mask[i]=false;
h_graph->h_graph_visited[i]=false;
if(i%100==0)
printf("%d nodes read \n",i);
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as true in the mask
h_graph->h_graph_mask[source]=true;
h_graph->h_graph_visited[source]=true;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
for(int i=0; i < edge_list_size ; i++)
{
fscanf(fp,"%d %d",&id,&cost);
h_graph->h_graph_edges[i] = id;
if(i%100==0)
printf("%d edges read \n",i);
}
if(fp)
fclose(fp);
printf("Read File\n");
}
////////////////////////////////////////////////////////////////////////////////
//Apply BFS on a Graph using CUDA
////////////////////////////////////////////////////////////////////////////////
void BFSGraph( int argc, char** argv)
{
char *input_f;
input_f = "../../data/graph64k.txt";
//input_f = "/home/devalshah/Perforce/deval_shah/home/deval/gem5-gpu/benchmarks/data/4096nodes.in.rodinia";//argv[1];
//if(argc!=2){
//Usage(argc, argv);
//exit(0);
//}
int source=0;
host_graph* h_graph=(host_graph*) malloc(sizeof(host_graph)) ;
int no_of_nodes=NO_OF_NODES;
int edge_list_size=NO_OF_EDGES;
/*
Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes);
bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes);
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
*/
int num_of_blocks = 1;
int num_of_threads_per_block = no_of_nodes;
//Make execution Parameters according to the number of nodes
//Distribute threads across multiple Blocks if necessary
if(no_of_nodes>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
//init_graph_from_file(input_f,h_graph);
printf("Mem init trick - do bfs init in CPU\n");
CU_CHECK_ERR( edgeExtra(3, (void*)h_graph, 0, 0, input_f) );
//CU_CHECK_ERR( edgeExtra(2, (void*)lpm, 1, 1, IPV4_PREFIX_FILE) );
//argv[1];
printf("Reading File\n");
//Read in Graph from a file
//Copy the Node list to device memory
Node* d_graph_nodes;
cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
cudaMemcpy( d_graph_nodes, h_graph->h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* d_graph_edges;
cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
cudaMemcpy( d_graph_edges, h_graph->h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ;
//Copy the Mask to device memory
bool* d_graph_mask;
cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_mask, h_graph->h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
bool* d_updating_graph_mask;
cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_updating_graph_mask, h_graph->h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* d_graph_visited;
cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ;
cudaMemcpy( d_graph_visited, h_graph->h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ;
// allocate mem for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++)
h_cost[i]=-1;
h_cost[source]=0;
// allocate device memory for result
int* d_cost;
cudaMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes);
//make a bool to check if the execution is over
bool *d_over;
cudaMalloc( (void**) &d_over, sizeof(bool));
#ifdef GEM5_FUSION
//m5_work_begin(0, 0);
#endif
cudaMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ;
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( num_of_blocks, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k=0;
printf("Start traversing the tree\n");
bool stop;
//Call the Kernel untill all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ;
Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes);
// check if kernel execution generated and error
Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
// check if kernel execution generated and error
cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
cudaMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ;
#ifdef GEM5_FUSION
//m5_work_end(0, 0);
#endif
//Store the result into a file
// FILE *fpo = fopen("result.txt","w");
FILE *fpo = stdout;
for(int i=0;i<no_of_nodes&&i<1000;i++)
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
// printf("Result stored in result.txt\n");
// cleanup memory
/*free( h_graph_nodes);
free( h_graph_edges);
free( h_graph_mask);
free( h_updating_graph_mask);
free( h_graph_visited);
*/
free( h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
}
|
9a76144de9b0b79792eb0d24de179c58c9b4fbf3.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2018 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include <random>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
namespace xgboost {
namespace tree {
void BuildGidx(DeviceShard* shard, int n_rows, int n_cols,
bst_float sparsity=0) {
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
const SparsePage& batch = *(*dmat)->GetRowBatches().begin();
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
shard->InitRowPtrs(batch);
shard->InitCompressedData(cmat, batch);
delete dmat;
}
TEST(GpuHist, BuildGidxDense) {
int const n_rows = 16, n_cols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols);
std::vector<common::CompressedByteT> h_gidx_buffer;
h_gidx_buffer = shard.gidx_buffer.AsVector();
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.row_stride, n_cols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < n_rows * n_cols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int const n_rows = 16, n_cols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer;
h_gidx_buffer = shard.gidx_buffer.AsVector();
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(shard.row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < n_rows * shard.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
void TestBuildHist(GPUHistBuilderBase& builder) {
int const n_rows = 16, n_cols = 8;
TrainParam param;
param.max_depth = 6;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(n_rows);
for (size_t i = 0; i < h_gpair.size(); ++i) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
h_gpair[i] = GradientPair(grad, hess);
}
thrust::device_vector<GradientPair> gpair (n_rows);
gpair = h_gpair;
int num_symbols = shard.n_bins + 1;
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (
shard.gidx_buffer.Size());
common::CompressedByteT* d_gidx_buffer_ptr = shard.gidx_buffer.Data();
dh::safe_cuda(hipMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * shard.gidx_buffer.Size(),
hipMemcpyDeviceToHost));
auto gidx = common::CompressedIterator<uint32_t>(h_gidx_buffer.data(),
num_symbols);
shard.ridx_segments.resize(1);
shard.ridx_segments[0] = Segment(0, n_rows);
shard.hist.AllocateHistogram(0);
shard.gpair.copy(gpair.begin(), gpair.end());
thrust::sequence(shard.ridx.CurrentDVec().tbegin(),
shard.ridx.CurrentDVec().tend());
builder.Build(&shard, 0);
DeviceHistogram d_hist = shard.hist;
GradientPairSumT* d_histptr {d_hist.GetHistPtr(0)};
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientPairSumT> h_result (d_hist.data.size()/2);
size_t data_size = sizeof(GradientPairSumT) / (
sizeof(GradientPairSumT) / sizeof(GradientPairSumT::ValueT));
data_size *= d_hist.data.size();
dh::safe_cuda(hipMemcpy(h_result.data(), d_histptr, data_size,
hipMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
GlobalMemHistBuilder builder;
TestBuildHist(builder);
}
TEST(GpuHist, BuildHistSharedMem) {
SharedMemHistBuilder builder;
TestBuildHist(builder);
}
common::HistCutMatrix GetHostCutMatrix () {
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int n_rows = 16;
constexpr int n_cols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.colsample_bylevel = 1;
param.colsample_bytree = 1;
param.min_child_weight = 0.01;
// Disable all parameters.
param.reg_alpha = 0.0;
param.reg_lambda = 0;
param.max_delta_step = 0.0;
for (size_t i = 0; i < n_cols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard> shard {new DeviceShard(0, 0, n_rows, param)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
// Initialize DeviceShard::cut
common::HistCutMatrix cmat = GetHostCutMatrix();
// Copy cut matrix to device.
DeviceShard::DeviceHistCutMatrix cut;
shard->ba.Allocate(0, true,
&(shard->cut_.feature_segments), cmat.row_ptr.size(),
&(shard->cut_.min_fvalue), cmat.min_val.size(),
&(shard->cut_.gidx_fvalue_map), 24,
&(shard->monotone_constraints), n_cols);
shard->cut_.feature_segments.copy(cmat.row_ptr.begin(), cmat.row_ptr.end());
shard->cut_.gidx_fvalue_map.copy(cmat.cut.begin(), cmat.cut.end());
shard->monotone_constraints.copy(param.monotone_constraints.begin(),
param.monotone_constraints.end());
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * n_cols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.data.size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.data.begin());
// Initialize GPUHistMaker
GPUHistMaker hist_maker = GPUHistMaker();
hist_maker.param_ = param;
hist_maker.shards_.push_back(std::move(shard));
hist_maker.column_sampler_.Init(n_cols,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
tree.InitModel();
MetaInfo info;
info.num_row_ = n_rows;
info.num_col_ = n_cols;
hist_maker.info_ = &info;
hist_maker.node_value_constraints_.resize(1);
hist_maker.node_value_constraints_[0].lower_bound = -1.0;
hist_maker.node_value_constraints_[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res =
hist_maker.EvaluateSplits({0}, &tree);
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res[0].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
}
TEST(GpuHist, ApplySplit) {
GPUHistMaker hist_maker = GPUHistMaker();
int constexpr nid = 0;
int constexpr n_rows = 16;
int constexpr n_cols = 8;
TrainParam param;
param.silent = true;
// Initialize shard
for (size_t i = 0; i < n_cols; ++i) {
param.monotone_constraints.emplace_back(0);
}
hist_maker.shards_.resize(1);
hist_maker.shards_[0].reset(new DeviceShard(0, 0, n_rows, param));
auto& shard = hist_maker.shards_.at(0);
shard->ridx_segments.resize(3); // 3 nodes.
shard->node_sum_gradients.resize(3);
shard->ridx_segments[0] = Segment(0, n_rows);
shard->ba.Allocate(0, true, &(shard->ridx), n_rows,
&(shard->position), n_rows);
shard->row_stride = n_cols;
thrust::sequence(shard->ridx.CurrentDVec().tbegin(),
shard->ridx.CurrentDVec().tend());
// Free inside DeviceShard
dh::safe_cuda(hipHostMalloc(&(shard->tmp_pinned), sizeof(int64_t)));
// Initialize GPUHistMaker
hist_maker.param_ = param;
RegTree tree;
tree.InitModel();
DeviceSplitCandidate candidate;
candidate.Update(2, kLeftDir,
0.59, 4, // fvalue has to be equal to one of the cut field
GradientPair(8.2, 2.8), GradientPair(6.3, 3.6),
GPUTrainingParam(param));
GPUHistMaker::ExpandEntry candidate_entry {0, 0, candidate, 0};
candidate_entry.nid = nid;
auto const& nodes = tree.GetNodes();
size_t n_nodes = nodes.size();
// Used to get bin_id in update position.
common::HistCutMatrix cmat = GetHostCutMatrix();
hist_maker.hmat_ = cmat;
MetaInfo info;
info.num_row_ = n_rows;
info.num_col_ = n_cols;
info.num_nonzero_ = n_rows * n_cols; // Dense
// Initialize gidx
int n_bins = 24;
int row_stride = n_cols;
int num_symbols = n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(
row_stride * n_rows, num_symbols);
shard->ba.Allocate(0, param.silent,
&(shard->gidx_buffer), compressed_size_bytes);
common::CompressedBufferWriter wr(num_symbols);
std::vector<int> h_gidx (n_rows * row_stride);
std::iota(h_gidx.begin(), h_gidx.end(), 0);
std::vector<common::CompressedByteT> h_gidx_compressed (compressed_size_bytes);
wr.Write(h_gidx_compressed.data(), h_gidx.begin(), h_gidx.end());
shard->gidx_buffer.copy(h_gidx_compressed.begin(), h_gidx_compressed.end());
shard->gidx = common::CompressedIterator<uint32_t>(
shard->gidx_buffer.Data(), num_symbols);
hist_maker.info_ = &info;
hist_maker.ApplySplit(candidate_entry, &tree);
ASSERT_FALSE(tree[nid].IsLeaf());
int left_nidx = tree[nid].LeftChild();
int right_nidx = tree[nid].RightChild();
ASSERT_EQ(shard->ridx_segments[left_nidx].begin, 0);
ASSERT_EQ(shard->ridx_segments[left_nidx].end, 6);
ASSERT_EQ(shard->ridx_segments[right_nidx].begin, 6);
ASSERT_EQ(shard->ridx_segments[right_nidx].end, 16);
}
} // namespace tree
} // namespace xgboost
| 9a76144de9b0b79792eb0d24de179c58c9b4fbf3.cu | /*!
* Copyright 2017-2018 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include <random>
#include "../helpers.h"
#include "gtest/gtest.h"
#include "../../../src/data/sparse_page_source.h"
#include "../../../src/gbm/gbtree_model.h"
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../../../src/tree/updater_gpu_common.cuh"
#include "../../../src/common/common.h"
namespace xgboost {
namespace tree {
void BuildGidx(DeviceShard* shard, int n_rows, int n_cols,
bst_float sparsity=0) {
auto dmat = CreateDMatrix(n_rows, n_cols, sparsity, 3);
const SparsePage& batch = *(*dmat)->GetRowBatches().begin();
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
shard->InitRowPtrs(batch);
shard->InitCompressedData(cmat, batch);
delete dmat;
}
TEST(GpuHist, BuildGidxDense) {
int const n_rows = 16, n_cols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols);
std::vector<common::CompressedByteT> h_gidx_buffer;
h_gidx_buffer = shard.gidx_buffer.AsVector();
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_EQ(shard.row_stride, n_cols);
std::vector<uint32_t> solution = {
0, 3, 8, 9, 14, 17, 20, 21,
0, 4, 7, 10, 14, 16, 19, 22,
1, 3, 7, 11, 14, 15, 19, 21,
2, 3, 7, 9, 13, 16, 20, 22,
2, 3, 6, 9, 12, 16, 20, 21,
1, 5, 6, 10, 13, 16, 20, 21,
2, 5, 8, 9, 13, 17, 19, 22,
2, 4, 6, 10, 14, 17, 19, 21,
2, 5, 7, 9, 13, 16, 19, 22,
0, 3, 8, 10, 12, 16, 19, 22,
1, 3, 7, 10, 13, 16, 19, 21,
1, 3, 8, 10, 13, 17, 20, 22,
2, 4, 6, 9, 14, 15, 19, 22,
1, 4, 6, 9, 13, 16, 19, 21,
2, 4, 8, 10, 14, 15, 19, 22,
1, 4, 7, 10, 14, 16, 19, 21,
};
for (size_t i = 0; i < n_rows * n_cols; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
TEST(GpuHist, BuildGidxSparse) {
int const n_rows = 16, n_cols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols, 0.9f);
std::vector<common::CompressedByteT> h_gidx_buffer;
h_gidx_buffer = shard.gidx_buffer.AsVector();
common::CompressedIterator<uint32_t> gidx(h_gidx_buffer.data(), 25);
ASSERT_LE(shard.row_stride, 3);
// row_stride = 3, 16 rows, 48 entries for ELLPack
std::vector<uint32_t> solution = {
15, 24, 24, 0, 24, 24, 24, 24, 24, 24, 24, 24, 20, 24, 24, 24,
24, 24, 24, 24, 24, 5, 24, 24, 0, 16, 24, 15, 24, 24, 24, 24,
24, 7, 14, 16, 4, 24, 24, 24, 24, 24, 9, 24, 24, 1, 24, 24
};
for (size_t i = 0; i < n_rows * shard.row_stride; ++i) {
ASSERT_EQ(solution[i], gidx[i]);
}
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
void TestBuildHist(GPUHistBuilderBase& builder) {
int const n_rows = 16, n_cols = 8;
TrainParam param;
param.max_depth = 6;
param.n_gpus = 1;
param.max_leaves = 0;
DeviceShard shard(0, 0, n_rows, param);
BuildGidx(&shard, n_rows, n_cols);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
std::vector<GradientPair> h_gpair(n_rows);
for (size_t i = 0; i < h_gpair.size(); ++i) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
h_gpair[i] = GradientPair(grad, hess);
}
thrust::device_vector<GradientPair> gpair (n_rows);
gpair = h_gpair;
int num_symbols = shard.n_bins + 1;
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (
shard.gidx_buffer.Size());
common::CompressedByteT* d_gidx_buffer_ptr = shard.gidx_buffer.Data();
dh::safe_cuda(cudaMemcpy(h_gidx_buffer.data(), d_gidx_buffer_ptr,
sizeof(common::CompressedByteT) * shard.gidx_buffer.Size(),
cudaMemcpyDeviceToHost));
auto gidx = common::CompressedIterator<uint32_t>(h_gidx_buffer.data(),
num_symbols);
shard.ridx_segments.resize(1);
shard.ridx_segments[0] = Segment(0, n_rows);
shard.hist.AllocateHistogram(0);
shard.gpair.copy(gpair.begin(), gpair.end());
thrust::sequence(shard.ridx.CurrentDVec().tbegin(),
shard.ridx.CurrentDVec().tend());
builder.Build(&shard, 0);
DeviceHistogram d_hist = shard.hist;
GradientPairSumT* d_histptr {d_hist.GetHistPtr(0)};
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientPairSumT> h_result (d_hist.data.size()/2);
size_t data_size = sizeof(GradientPairSumT) / (
sizeof(GradientPairSumT) / sizeof(GradientPairSumT::ValueT));
data_size *= d_hist.data.size();
dh::safe_cuda(cudaMemcpy(h_result.data(), d_histptr, data_size,
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
std::cout << std::fixed;
for (size_t i = 0; i < h_result.size(); ++i) {
EXPECT_NEAR(h_result[i].GetGrad(), solution[i].GetGrad(), 0.01f);
EXPECT_NEAR(h_result[i].GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
GlobalMemHistBuilder builder;
TestBuildHist(builder);
}
TEST(GpuHist, BuildHistSharedMem) {
SharedMemHistBuilder builder;
TestBuildHist(builder);
}
common::HistCutMatrix GetHostCutMatrix () {
common::HistCutMatrix cmat;
cmat.row_ptr = {0, 3, 6, 9, 12, 15, 18, 21, 24};
cmat.min_val = {0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f};
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.cut = {0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f};
return cmat;
}
// TODO(trivialfis): This test is over simplified.
TEST(GpuHist, EvaluateSplits) {
constexpr int n_rows = 16;
constexpr int n_cols = 8;
TrainParam param;
param.max_depth = 1;
param.n_gpus = 1;
param.colsample_bylevel = 1;
param.colsample_bytree = 1;
param.min_child_weight = 0.01;
// Disable all parameters.
param.reg_alpha = 0.0;
param.reg_lambda = 0;
param.max_delta_step = 0.0;
for (size_t i = 0; i < n_cols; ++i) {
param.monotone_constraints.emplace_back(0);
}
int max_bins = 4;
// Initialize DeviceShard
std::unique_ptr<DeviceShard> shard {new DeviceShard(0, 0, n_rows, param)};
// Initialize DeviceShard::node_sum_gradients
shard->node_sum_gradients = {{6.4f, 12.8f}};
// Initialize DeviceShard::cut
common::HistCutMatrix cmat = GetHostCutMatrix();
// Copy cut matrix to device.
DeviceShard::DeviceHistCutMatrix cut;
shard->ba.Allocate(0, true,
&(shard->cut_.feature_segments), cmat.row_ptr.size(),
&(shard->cut_.min_fvalue), cmat.min_val.size(),
&(shard->cut_.gidx_fvalue_map), 24,
&(shard->monotone_constraints), n_cols);
shard->cut_.feature_segments.copy(cmat.row_ptr.begin(), cmat.row_ptr.end());
shard->cut_.gidx_fvalue_map.copy(cmat.cut.begin(), cmat.cut.end());
shard->monotone_constraints.copy(param.monotone_constraints.begin(),
param.monotone_constraints.end());
// Initialize DeviceShard::hist
shard->hist.Init(0, (max_bins - 1) * n_cols);
shard->hist.AllocateHistogram(0);
// Each row of hist_gpair represents gpairs for one feature.
// Each entry represents a bin.
std::vector<GradientPairPrecise> hist_gpair = GetHostHistGpair();
std::vector<bst_float> hist;
for (auto pair : hist_gpair) {
hist.push_back(pair.GetGrad());
hist.push_back(pair.GetHess());
}
ASSERT_EQ(shard->hist.data.size(), hist.size());
thrust::copy(hist.begin(), hist.end(),
shard->hist.data.begin());
// Initialize GPUHistMaker
GPUHistMaker hist_maker = GPUHistMaker();
hist_maker.param_ = param;
hist_maker.shards_.push_back(std::move(shard));
hist_maker.column_sampler_.Init(n_cols,
param.colsample_bylevel,
param.colsample_bytree,
false);
RegTree tree;
tree.InitModel();
MetaInfo info;
info.num_row_ = n_rows;
info.num_col_ = n_cols;
hist_maker.info_ = &info;
hist_maker.node_value_constraints_.resize(1);
hist_maker.node_value_constraints_[0].lower_bound = -1.0;
hist_maker.node_value_constraints_[0].upper_bound = 1.0;
std::vector<DeviceSplitCandidate> res =
hist_maker.EvaluateSplits({0}, &tree);
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res[0].findex, 7);
ASSERT_NEAR(res[0].fvalue, 0.26, xgboost::kRtEps);
}
TEST(GpuHist, ApplySplit) {
GPUHistMaker hist_maker = GPUHistMaker();
int constexpr nid = 0;
int constexpr n_rows = 16;
int constexpr n_cols = 8;
TrainParam param;
param.silent = true;
// Initialize shard
for (size_t i = 0; i < n_cols; ++i) {
param.monotone_constraints.emplace_back(0);
}
hist_maker.shards_.resize(1);
hist_maker.shards_[0].reset(new DeviceShard(0, 0, n_rows, param));
auto& shard = hist_maker.shards_.at(0);
shard->ridx_segments.resize(3); // 3 nodes.
shard->node_sum_gradients.resize(3);
shard->ridx_segments[0] = Segment(0, n_rows);
shard->ba.Allocate(0, true, &(shard->ridx), n_rows,
&(shard->position), n_rows);
shard->row_stride = n_cols;
thrust::sequence(shard->ridx.CurrentDVec().tbegin(),
shard->ridx.CurrentDVec().tend());
// Free inside DeviceShard
dh::safe_cuda(cudaMallocHost(&(shard->tmp_pinned), sizeof(int64_t)));
// Initialize GPUHistMaker
hist_maker.param_ = param;
RegTree tree;
tree.InitModel();
DeviceSplitCandidate candidate;
candidate.Update(2, kLeftDir,
0.59, 4, // fvalue has to be equal to one of the cut field
GradientPair(8.2, 2.8), GradientPair(6.3, 3.6),
GPUTrainingParam(param));
GPUHistMaker::ExpandEntry candidate_entry {0, 0, candidate, 0};
candidate_entry.nid = nid;
auto const& nodes = tree.GetNodes();
size_t n_nodes = nodes.size();
// Used to get bin_id in update position.
common::HistCutMatrix cmat = GetHostCutMatrix();
hist_maker.hmat_ = cmat;
MetaInfo info;
info.num_row_ = n_rows;
info.num_col_ = n_cols;
info.num_nonzero_ = n_rows * n_cols; // Dense
// Initialize gidx
int n_bins = 24;
int row_stride = n_cols;
int num_symbols = n_bins + 1;
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(
row_stride * n_rows, num_symbols);
shard->ba.Allocate(0, param.silent,
&(shard->gidx_buffer), compressed_size_bytes);
common::CompressedBufferWriter wr(num_symbols);
std::vector<int> h_gidx (n_rows * row_stride);
std::iota(h_gidx.begin(), h_gidx.end(), 0);
std::vector<common::CompressedByteT> h_gidx_compressed (compressed_size_bytes);
wr.Write(h_gidx_compressed.data(), h_gidx.begin(), h_gidx.end());
shard->gidx_buffer.copy(h_gidx_compressed.begin(), h_gidx_compressed.end());
shard->gidx = common::CompressedIterator<uint32_t>(
shard->gidx_buffer.Data(), num_symbols);
hist_maker.info_ = &info;
hist_maker.ApplySplit(candidate_entry, &tree);
ASSERT_FALSE(tree[nid].IsLeaf());
int left_nidx = tree[nid].LeftChild();
int right_nidx = tree[nid].RightChild();
ASSERT_EQ(shard->ridx_segments[left_nidx].begin, 0);
ASSERT_EQ(shard->ridx_segments[left_nidx].end, 6);
ASSERT_EQ(shard->ridx_segments[right_nidx].begin, 6);
ASSERT_EQ(shard->ridx_segments[right_nidx].end, 16);
}
} // namespace tree
} // namespace xgboost
|
dce9a1d9f4e73def3e8f4fa8fed131fbebfc08ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"main.h"
#include<iostream>
#include<cmath>
#include<unordered_map>
#include<vector>
#include<cstdio>
#include <stdio.h>
#include <string.h>
#include <ctime>
#include <hiprand/hiprand_kernel.h>
using std::unordered_map;
using std::vector;
using hhg::Tool;
using std::string;
using std::cout;
using std::endl;
using std::ends;
__global__
void cuda_calcS(float * col1, float * col2, int * results,int size,float originS){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float *newOrder;
newOrder = (float*) malloc (size*sizeof(float));
float table[4] = {0,0,0,0};
//random number generator initialize
hiprandState_t state;
unsigned int seed = index;
hiprand_init(seed, 0, 0, &state);
for(int iterationTime = index; iterationTime <1000; iterationTime+=stride){
for(int i = 0; i < size; i++){
newOrder[i] = col2[i];
}
float tmp;
int randomIndex;
for(int i = size-1; i > 0; i--){
randomIndex = hiprand(&state)%(i+1);
tmp = newOrder[randomIndex];
newOrder[randomIndex] = newOrder[i];
newOrder[i] = tmp;
}
float s = 0;
for (int index1 = 0; index1 < size; index1 ++){
for(int index2 = index1 + 1 ; index2 < size; index2++){
float rx = abs(col1[index1] - col1[index2]);
float ry = abs(newOrder[index1] - newOrder[index2]);
table[0] = 0;table[1] = 0;table[2] = 0;table[3] = 0;
for(int index3 = 0; index3 < size; index3++){
if(index3 != index1 && index3 != index2){
float dy = abs(newOrder[index1] - newOrder[index3]);
float dx = abs(col1[index1] - col1[index3]);
if(dx <= rx){
if(dy <= ry) table[0]++;
else table[2]++;
}
else{
if(dy <= ry) table[1]++;
else table[3]++;
}
}
}
float a12 = table[1], a21 = table[2], a11 = table[0], a22 = table[3];
float a1_ = a11 + a12, a2_ = a21 + a22, a_1 = a11 + a21, a_2 = a22 + a12;
if( a1_==0 || a2_==0 || a_1==0 || a_2==0 ){
continue;
}
s += (size-2.0)*pow(a12*a21 - a11*a22 , 2) / (a_1*a_2*a1_*a2_);
}
}
if(s >= originS) results[index] ++;
}
delete[] table;
delete[] newOrder;
}
int main(int argc,char *argv[]){
Tool *t = t->getInstance();
std::clock_t start;
if(argc != 2){
cout<<"Invalid number of Parameters"<<endl;
return 0;
}
int numofRow = atoi(argv[1]);
double duration;
unordered_map<string, vector<float>> data = t->dataGenerate(numofRow, 5, 1.0);
start = std::clock();
unordered_map<string, vector<float>> cols = t->randomlyPickColumns(data, 2);
int size = numofRow;
int blockSize = 256;
int numBlocks = (1000 + blockSize - 1) / blockSize;
float *col1, *col2;
int *results;
hipMallocManaged(&col1, size*sizeof(float));
hipMallocManaged(&col2, size*sizeof(float));
hipMallocManaged(&results, numBlocks*blockSize*sizeof(int));
hipMemset(results, 0, numBlocks*blockSize*sizeof(int));
int counter = 0;
float originS = 0;
originS = t->calcS(cols);
//cout<<"Original S: "<< originS << endl << "====================================="<<endl;
for(int i = 0 ; i < size ; i++){
auto iter = data.begin();
col1[i] = iter->second[i];
iter++;
col2[i] = iter->second[i];
}
int device = -1;
hipGetDevice(&device);
hipMemPrefetchAsync(col1, size*sizeof(float), device, NULL);
hipMemPrefetchAsync(col2, size*sizeof(float), device, NULL);
hipMemPrefetchAsync(results, numBlocks*blockSize*sizeof(int), device, NULL);
hipMemPrefetchAsync(&size, sizeof(int), device, NULL);
hipLaunchKernelGGL(( cuda_calcS), dim3(numBlocks), dim3(blockSize), 0, 0, col1, col2, results, size, originS);
hipDeviceSynchronize();
for(int i = 0 ; i < numBlocks*blockSize; i++){
counter+= results[i];
}
float p = counter / t->iterTime_;
cout<<"There are "<<counter<<" S greater than the original S."<<endl;
cout<<"P = "<<p<<endl;
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<< "duration: "<<duration <<"seconds\n";
hipFree(col1);
hipFree(col2);
hipFree(results);
}
| dce9a1d9f4e73def3e8f4fa8fed131fbebfc08ed.cu | #include"main.h"
#include<iostream>
#include<cmath>
#include<unordered_map>
#include<vector>
#include<cstdio>
#include <stdio.h>
#include <string.h>
#include <ctime>
#include <curand_kernel.h>
using std::unordered_map;
using std::vector;
using hhg::Tool;
using std::string;
using std::cout;
using std::endl;
using std::ends;
__global__
void cuda_calcS(float * col1, float * col2, int * results,int size,float originS){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
float *newOrder;
newOrder = (float*) malloc (size*sizeof(float));
float table[4] = {0,0,0,0};
//random number generator initialize
curandState state;
unsigned int seed = index;
curand_init(seed, 0, 0, &state);
for(int iterationTime = index; iterationTime <1000; iterationTime+=stride){
for(int i = 0; i < size; i++){
newOrder[i] = col2[i];
}
float tmp;
int randomIndex;
for(int i = size-1; i > 0; i--){
randomIndex = curand(&state)%(i+1);
tmp = newOrder[randomIndex];
newOrder[randomIndex] = newOrder[i];
newOrder[i] = tmp;
}
float s = 0;
for (int index1 = 0; index1 < size; index1 ++){
for(int index2 = index1 + 1 ; index2 < size; index2++){
float rx = abs(col1[index1] - col1[index2]);
float ry = abs(newOrder[index1] - newOrder[index2]);
table[0] = 0;table[1] = 0;table[2] = 0;table[3] = 0;
for(int index3 = 0; index3 < size; index3++){
if(index3 != index1 && index3 != index2){
float dy = abs(newOrder[index1] - newOrder[index3]);
float dx = abs(col1[index1] - col1[index3]);
if(dx <= rx){
if(dy <= ry) table[0]++;
else table[2]++;
}
else{
if(dy <= ry) table[1]++;
else table[3]++;
}
}
}
float a12 = table[1], a21 = table[2], a11 = table[0], a22 = table[3];
float a1_ = a11 + a12, a2_ = a21 + a22, a_1 = a11 + a21, a_2 = a22 + a12;
if( a1_==0 || a2_==0 || a_1==0 || a_2==0 ){
continue;
}
s += (size-2.0)*pow(a12*a21 - a11*a22 , 2) / (a_1*a_2*a1_*a2_);
}
}
if(s >= originS) results[index] ++;
}
delete[] table;
delete[] newOrder;
}
int main(int argc,char *argv[]){
Tool *t = t->getInstance();
std::clock_t start;
if(argc != 2){
cout<<"Invalid number of Parameters"<<endl;
return 0;
}
int numofRow = atoi(argv[1]);
double duration;
unordered_map<string, vector<float>> data = t->dataGenerate(numofRow, 5, 1.0);
start = std::clock();
unordered_map<string, vector<float>> cols = t->randomlyPickColumns(data, 2);
int size = numofRow;
int blockSize = 256;
int numBlocks = (1000 + blockSize - 1) / blockSize;
float *col1, *col2;
int *results;
cudaMallocManaged(&col1, size*sizeof(float));
cudaMallocManaged(&col2, size*sizeof(float));
cudaMallocManaged(&results, numBlocks*blockSize*sizeof(int));
cudaMemset(results, 0, numBlocks*blockSize*sizeof(int));
int counter = 0;
float originS = 0;
originS = t->calcS(cols);
//cout<<"Original S: "<< originS << endl << "====================================="<<endl;
for(int i = 0 ; i < size ; i++){
auto iter = data.begin();
col1[i] = iter->second[i];
iter++;
col2[i] = iter->second[i];
}
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(col1, size*sizeof(float), device, NULL);
cudaMemPrefetchAsync(col2, size*sizeof(float), device, NULL);
cudaMemPrefetchAsync(results, numBlocks*blockSize*sizeof(int), device, NULL);
cudaMemPrefetchAsync(&size, sizeof(int), device, NULL);
cuda_calcS<<<numBlocks, blockSize>>>(col1, col2, results, size, originS);
cudaDeviceSynchronize();
for(int i = 0 ; i < numBlocks*blockSize; i++){
counter+= results[i];
}
float p = counter / t->iterTime_;
cout<<"There are "<<counter<<" S greater than the original S."<<endl;
cout<<"P = "<<p<<endl;
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<< "duration: "<<duration <<"seconds\n";
cudaFree(col1);
cudaFree(col2);
cudaFree(results);
}
|
241c1172a0a67648a4eba2fe049f39a524bce3c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <functional>
#include <inference/embedding_feature_combiner.hpp>
#include <utils.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <typename TypeEmbedding>
__global__ void embedding_feature_combine_kernel(const float* input, TypeEmbedding* output,
const int* row_ptrs, int batch_size, int slot_num,
int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto& block = cooperative_groups::this_thread_block();
// each block partition corresponding to one sample
const int bid = block.group_index().x;
// each thread corresponding to one element in the embedding vector
const int tid = block.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num =
row_ptrs[feature_row_index + 1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j) * embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index * embedding_vec_size + tid] = tmp;
} // end for
} // end if
}
template <>
__global__ void embedding_feature_combine_kernel(const float* input, __half* output,
const int* row_ptrs, int batch_size, int slot_num,
int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto& block = cooperative_groups::this_thread_block();
// each block partition corresponding to one sample
const int bid = block.group_index().x;
// each thread corresponding to one element in the embedding vector
const int tid = block.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num =
row_ptrs[feature_row_index + 1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j) * embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index * embedding_vec_size + tid] = __float2half(tmp);
} // end for
} // end if
}
template <typename TypeEmbedding, int TileSize>
__global__ void embedding_feature_combine_tiled_kernel(const float* input, TypeEmbedding* output,
const int* row_ptrs, int batch_size,
int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto& block = cooperative_groups::this_thread_block();
const auto& tile = cooperative_groups::tiled_partition<TileSize>(block);
// each block partition corresponding to one sample
const int bid = block.group_index().x * tile.meta_group_size() + tile.meta_group_rank();
// each thread corresponding to one element in the embedding vector
const int tid = tile.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num =
row_ptrs[feature_row_index + 1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j) * embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index * embedding_vec_size + tid] = tmp;
} // end for
} // end if
}
template <int TileSize>
__global__ void embedding_feature_combine_tiled_kernel(const float* input, __half* output,
const int* row_ptrs, int batch_size,
int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto& block = cooperative_groups::this_thread_block();
const auto& tile = cooperative_groups::tiled_partition<TileSize>(block);
// each block partition corresponding to one sample
const int bid = block.group_index().x * tile.meta_group_size() + tile.meta_group_rank();
// each thread corresponding to one element in the embedding vector
const int tid = tile.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num =
row_ptrs[feature_row_index + 1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j) * embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index * embedding_vec_size + tid] = __float2half(tmp);
} // end for
} // end if
}
template <typename TypeEmbedding>
void launch_embedding_feature_combine_kernel(const float* input, TypeEmbedding* output,
const int* row_ptrs, int batch_size, int slot_num,
int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type,
hipStream_t stream) {
if (embedding_vec_size <= 2) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 2>)
, dim3((batch_size - 1) / 32 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 4) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 4>)
, dim3((batch_size - 1) / 16 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 8) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 8>)
, dim3((batch_size - 1) / 8 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size, slot_num,
embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 16) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 16>)
, dim3((batch_size - 1) / 4 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size, slot_num,
embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 32) {
hipLaunchKernelGGL(( embedding_feature_combine_tiled_kernel<TypeEmbedding, 32>)
, dim3((batch_size - 1) / 2 + 1), dim3(64), 0, stream, input, output, row_ptrs, batch_size, slot_num,
embedding_vec_size, combiner_type);
} else {
// each thread corresponds to one element in an embedding vector
hipLaunchKernelGGL(( embedding_feature_combine_kernel), dim3(batch_size), dim3(embedding_vec_size), 0, stream,
input, output, row_ptrs, batch_size, slot_num, embedding_vec_size, combiner_type);
}
}
} // end of namespace
template <typename TypeEmbedding>
EmbeddingFeatureCombiner<TypeEmbedding>::EmbeddingFeatureCombiner(
const std::shared_ptr<Tensor2<float>>& in_tensor,
const std::shared_ptr<Tensor2<int>>& row_ptrs_tensor, Tensor2<TypeEmbedding>& out_tensor,
int batch_size, int slot_num, EmbeddingFeatureCombiner_t combiner_type,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource),
slot_num_(slot_num),
batch_size_(batch_size),
combiner_type_(combiner_type) {
try {
// error input checking
const auto& in_dims = in_tensor->get_dimensions();
const auto& row_ptrs_dims = row_ptrs_tensor->get_dimensions();
if ((int)in_dims.size() != 2) CK_THROW_(Error_t::WrongInput, "The input tensor must be 2D");
for (auto i : in_dims) {
if (i == 0) {
CK_THROW_(Error_t::WrongInput, "The input dims can not be 0");
}
}
if ((int)row_ptrs_dims.size() != 1)
CK_THROW_(Error_t::WrongInput, "The row pointers tensor must be 1D");
if ((int)row_ptrs_dims[0] != batch_size * slot_num + 1)
CK_THROW_(Error_t::WrongInput,
"The dimension of row pointers tensor mismatch number of samples");
embedding_vec_size_ = in_dims[1];
std::vector<size_t> out_dims{static_cast<size_t>(batch_size_), static_cast<size_t>(slot_num_),
static_cast<size_t>(embedding_vec_size_)};
blobs_buff->reserve(out_dims, &out_tensor);
out_tensors_.push_back(out_tensor);
in_tensors_.push_back(in_tensor);
row_ptrs_tensors_.push_back(row_ptrs_tensor);
const Tensor2<float>* iptr1 = in_tensor.get();
Tensor2<float>* iptr2 = in_tensors_[0].get();
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename TypeEmbedding>
void EmbeddingFeatureCombiner<TypeEmbedding>::fprop(bool is_train) {
if (is_train)
CK_THROW_(Error_t::IllegalCall,
"The fprop() of EmbeddingFeatureCombiner should only be used for inference");
CudaDeviceContext context(get_device_id());
float* input = in_tensors_[0]->get_ptr();
TypeEmbedding* output = out_tensors_[0].get_ptr();
int* row_ptrs = row_ptrs_tensors_[0]->get_ptr();
auto in_dims = in_tensors_[0]->get_dimensions();
auto out_dims = out_tensors_[0].get_dimensions();
launch_embedding_feature_combine_kernel(input, output, row_ptrs, batch_size_, slot_num_,
embedding_vec_size_, combiner_type_,
get_gpu().get_stream());
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template class EmbeddingFeatureCombiner<float>;
template class EmbeddingFeatureCombiner<__half>;
} // namespace HugeCTR
| 241c1172a0a67648a4eba2fe049f39a524bce3c2.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <functional>
#include <inference/embedding_feature_combiner.hpp>
#include <utils.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <typename TypeEmbedding>
__global__ void embedding_feature_combine_kernel(const float* input, TypeEmbedding* output,
const int* row_ptrs, int batch_size, int slot_num,
int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto& block = cooperative_groups::this_thread_block();
// each block partition corresponding to one sample
const int bid = block.group_index().x;
// each thread corresponding to one element in the embedding vector
const int tid = block.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num =
row_ptrs[feature_row_index + 1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j) * embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index * embedding_vec_size + tid] = tmp;
} // end for
} // end if
}
template <>
__global__ void embedding_feature_combine_kernel(const float* input, __half* output,
const int* row_ptrs, int batch_size, int slot_num,
int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto& block = cooperative_groups::this_thread_block();
// each block partition corresponding to one sample
const int bid = block.group_index().x;
// each thread corresponding to one element in the embedding vector
const int tid = block.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num =
row_ptrs[feature_row_index + 1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j) * embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index * embedding_vec_size + tid] = __float2half(tmp);
} // end for
} // end if
}
template <typename TypeEmbedding, int TileSize>
__global__ void embedding_feature_combine_tiled_kernel(const float* input, TypeEmbedding* output,
const int* row_ptrs, int batch_size,
int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto& block = cooperative_groups::this_thread_block();
const auto& tile = cooperative_groups::tiled_partition<TileSize>(block);
// each block partition corresponding to one sample
const int bid = block.group_index().x * tile.meta_group_size() + tile.meta_group_rank();
// each thread corresponding to one element in the embedding vector
const int tid = tile.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num =
row_ptrs[feature_row_index + 1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j) * embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index * embedding_vec_size + tid] = tmp;
} // end for
} // end if
}
template <int TileSize>
__global__ void embedding_feature_combine_tiled_kernel(const float* input, __half* output,
const int* row_ptrs, int batch_size,
int slot_num, int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type) {
const auto& block = cooperative_groups::this_thread_block();
const auto& tile = cooperative_groups::tiled_partition<TileSize>(block);
// each block partition corresponding to one sample
const int bid = block.group_index().x * tile.meta_group_size() + tile.meta_group_rank();
// each thread corresponding to one element in the embedding vector
const int tid = tile.thread_rank();
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
int feature_row_index = bid * slot_num + i;
int row_offset = row_ptrs[feature_row_index]; // row offset within input
int feature_num =
row_ptrs[feature_row_index + 1] - row_offset; // num of feature vectors in one slot
float tmp = 0.0f;
// reduce in one slot
for (int j = 0; j < feature_num; j++)
tmp += input[(row_offset + j) * embedding_vec_size + tid];
if (combiner_type == EmbeddingFeatureCombiner_t::Mean && feature_num > 1) {
tmp /= feature_num;
}
output[feature_row_index * embedding_vec_size + tid] = __float2half(tmp);
} // end for
} // end if
}
template <typename TypeEmbedding>
void launch_embedding_feature_combine_kernel(const float* input, TypeEmbedding* output,
const int* row_ptrs, int batch_size, int slot_num,
int embedding_vec_size,
EmbeddingFeatureCombiner_t combiner_type,
cudaStream_t stream) {
if (embedding_vec_size <= 2) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 2>
<<<(batch_size - 1) / 32 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 4) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 4>
<<<(batch_size - 1) / 16 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size,
slot_num, embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 8) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 8>
<<<(batch_size - 1) / 8 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size, slot_num,
embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 16) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 16>
<<<(batch_size - 1) / 4 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size, slot_num,
embedding_vec_size, combiner_type);
} else if (embedding_vec_size <= 32) {
embedding_feature_combine_tiled_kernel<TypeEmbedding, 32>
<<<(batch_size - 1) / 2 + 1, 64, 0, stream>>>(input, output, row_ptrs, batch_size, slot_num,
embedding_vec_size, combiner_type);
} else {
// each thread corresponds to one element in an embedding vector
embedding_feature_combine_kernel<<<batch_size, embedding_vec_size, 0, stream>>>(
input, output, row_ptrs, batch_size, slot_num, embedding_vec_size, combiner_type);
}
}
} // end of namespace
template <typename TypeEmbedding>
EmbeddingFeatureCombiner<TypeEmbedding>::EmbeddingFeatureCombiner(
const std::shared_ptr<Tensor2<float>>& in_tensor,
const std::shared_ptr<Tensor2<int>>& row_ptrs_tensor, Tensor2<TypeEmbedding>& out_tensor,
int batch_size, int slot_num, EmbeddingFeatureCombiner_t combiner_type,
const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource),
slot_num_(slot_num),
batch_size_(batch_size),
combiner_type_(combiner_type) {
try {
// error input checking
const auto& in_dims = in_tensor->get_dimensions();
const auto& row_ptrs_dims = row_ptrs_tensor->get_dimensions();
if ((int)in_dims.size() != 2) CK_THROW_(Error_t::WrongInput, "The input tensor must be 2D");
for (auto i : in_dims) {
if (i == 0) {
CK_THROW_(Error_t::WrongInput, "The input dims can not be 0");
}
}
if ((int)row_ptrs_dims.size() != 1)
CK_THROW_(Error_t::WrongInput, "The row pointers tensor must be 1D");
if ((int)row_ptrs_dims[0] != batch_size * slot_num + 1)
CK_THROW_(Error_t::WrongInput,
"The dimension of row pointers tensor mismatch number of samples");
embedding_vec_size_ = in_dims[1];
std::vector<size_t> out_dims{static_cast<size_t>(batch_size_), static_cast<size_t>(slot_num_),
static_cast<size_t>(embedding_vec_size_)};
blobs_buff->reserve(out_dims, &out_tensor);
out_tensors_.push_back(out_tensor);
in_tensors_.push_back(in_tensor);
row_ptrs_tensors_.push_back(row_ptrs_tensor);
const Tensor2<float>* iptr1 = in_tensor.get();
Tensor2<float>* iptr2 = in_tensors_[0].get();
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename TypeEmbedding>
void EmbeddingFeatureCombiner<TypeEmbedding>::fprop(bool is_train) {
if (is_train)
CK_THROW_(Error_t::IllegalCall,
"The fprop() of EmbeddingFeatureCombiner should only be used for inference");
CudaDeviceContext context(get_device_id());
float* input = in_tensors_[0]->get_ptr();
TypeEmbedding* output = out_tensors_[0].get_ptr();
int* row_ptrs = row_ptrs_tensors_[0]->get_ptr();
auto in_dims = in_tensors_[0]->get_dimensions();
auto out_dims = out_tensors_[0].get_dimensions();
launch_embedding_feature_combine_kernel(input, output, row_ptrs, batch_size_, slot_num_,
embedding_vec_size_, combiner_type_,
get_gpu().get_stream());
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template class EmbeddingFeatureCombiner<float>;
template class EmbeddingFeatureCombiner<__half>;
} // namespace HugeCTR
|
16a77d4b06a2c277c5d8b1723ad1f89f7273cddb.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/hip/ForeachFunctors.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_foreach_abs_native.h>
#include <ATen/ops/_foreach_acos_native.h>
#include <ATen/ops/_foreach_asin_native.h>
#include <ATen/ops/_foreach_atan_native.h>
#include <ATen/ops/_foreach_ceil_native.h>
#include <ATen/ops/_foreach_cos_native.h>
#include <ATen/ops/_foreach_cosh_native.h>
#include <ATen/ops/_foreach_erf_native.h>
#include <ATen/ops/_foreach_erfc_native.h>
#include <ATen/ops/_foreach_exp_native.h>
#include <ATen/ops/_foreach_expm1_native.h>
#include <ATen/ops/_foreach_floor_native.h>
#include <ATen/ops/_foreach_frac_native.h>
#include <ATen/ops/_foreach_lgamma_native.h>
#include <ATen/ops/_foreach_log10_native.h>
#include <ATen/ops/_foreach_log1p_native.h>
#include <ATen/ops/_foreach_log2_native.h>
#include <ATen/ops/_foreach_log_native.h>
#include <ATen/ops/_foreach_neg_native.h>
#include <ATen/ops/_foreach_reciprocal_native.h>
#include <ATen/ops/_foreach_round_native.h>
#include <ATen/ops/_foreach_sigmoid_native.h>
#include <ATen/ops/_foreach_sign_native.h>
#include <ATen/ops/_foreach_sin_native.h>
#include <ATen/ops/_foreach_sinh_native.h>
#include <ATen/ops/_foreach_sqrt_native.h>
#include <ATen/ops/_foreach_tan_native.h>
#include <ATen/ops/_foreach_tanh_native.h>
#include <ATen/ops/_foreach_trunc_native.h>
#include <ATen/ops/_foreach_zero_native.h>
#include <ATen/ops/empty_like_native.h>
#endif
namespace at::native {
template <typename scalar_t, template <class> class Op>
std::vector<Tensor> foreach_unary_op(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t : tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<2>(
tensor_lists,
UnaryOpFunctor<
scalar_t,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>());
return tensor_lists[1];
}
template <typename scalar_t, template <class> class Op>
void foreach_unary_op_(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
UnaryOpFunctor<
scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>());
increment_version(tensors);
}
template <template <class> class Op>
std::vector<Tensor> floating_complex_half(TensorList tensors) {
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(
ScalarType::Half,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void floating_complex_half_(TensorList tensors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(
ScalarType::Half,
tensors[0].scalar_type(),
"foreach_unary_op_cuda_",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
std::vector<Tensor> all_types_complex_bfloat16_half_bool(TensorList tensors) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half,
ScalarType::BFloat16,
ScalarType::Bool,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void all_types_complex_bfloat16_half_bool_(TensorList tensors) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half,
ScalarType::BFloat16,
ScalarType::Bool,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
std::vector<Tensor> floating_complex_half_bfloat16(TensorList tensors) {
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void floating_complex_half_bfloat16_(TensorList tensors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda_",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
std::vector<Tensor> all_types_half_complex_bfloat16(TensorList tensors) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half,
at::ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void all_types_half_complex_bfloat16_(TensorList tensors) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half,
at::ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda_",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
std::vector<Tensor> floating_half(TensorList tensors) {
return AT_DISPATCH_FLOATING_TYPES_AND(
ScalarType::Half,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void floating_half_(TensorList tensors) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template <class> class Op>
std::vector<Tensor> floating_half_bfloat16(TensorList tensors) {
return AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void floating_half_bfloat16_(TensorList tensors) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda_",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
// makes the functor
#define STD_FUNCTOR(op_name, functor_name) \
template <typename T> \
struct functor_name { \
__device__ T operator()(T t) const { \
return std::op_name(t); \
} \
};
// given a functor and a "dispatch function", creates the outplace and inplace
// operations
#define OP_CUSTOM_FUNCTOR(function, op_name, functor_name) \
std::vector<Tensor> foreach_tensor_##op_name##_cuda(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors) || \
has_integral_tensor(tensors, /* includeBool */ true)) { \
return at::native::foreach_tensor_##op_name##_slow(tensors); \
} \
return function<functor_name>(tensors); \
} \
void foreach_tensor_##op_name##_cuda_(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors) || \
has_integral_tensor(tensors, /* includeBool */ true)) { \
return at::native::foreach_tensor_##op_name##_slow_(tensors); \
} \
\
function##_<functor_name>(tensors); \
}
// creates a functor, outplace version, and inplace version.
#define OP(function, op_name, functor_name) \
STD_FUNCTOR(op_name, functor_name); \
OP_CUSTOM_FUNCTOR(function, op_name, functor_name);
OP(floating_half_bfloat16, erfc, Erfc);
OP(floating_half, lgamma, Lgamma);
OP(floating_half_bfloat16, trunc, Truncf);
OP(floating_half_bfloat16, floor, Floor);
OP(floating_half_bfloat16, ceil, Ceil);
OP(floating_complex_half_bfloat16, acos, Acos);
OP(floating_complex_half_bfloat16, asin, Asin);
OP(floating_complex_half_bfloat16, atan, Atan);
OP(floating_complex_half_bfloat16, cosh, Cosh);
OP(floating_complex_half_bfloat16, tan, Tan);
OP(floating_complex_half_bfloat16, sin, Sin);
OP(floating_complex_half_bfloat16, sinh, Sinh);
OP(floating_complex_half_bfloat16, exp, Exp);
OP(floating_complex_half_bfloat16, expm1, Expm1);
OP(floating_complex_half_bfloat16, tanh, Tanh);
OP(floating_complex_half_bfloat16, log, Log);
OP(floating_complex_half_bfloat16, log10, Log10);
OP(floating_complex_half_bfloat16, log2, Log2);
OP(floating_complex_half_bfloat16, log1p, Log1p);
OP(floating_complex_half_bfloat16, cos, Cos);
OP(floating_complex_half_bfloat16, sqrt, Sqrt);
OP(floating_half_bfloat16, erf, Erf);
//
// Special cases
// These functions must be special cased as they can't be written as
// std::functor_name in OP macro
//
template <typename T>
struct Sigmoid {
T one = T(1);
__device__ T operator()(T t) const {
return (one / (one + ::exp(-t)));
}
};
template <typename T>
struct Round {
__device__ T operator()(T t) const {
return std::nearbyint(t);
}
};
template <typename T>
struct Trunc {
__device__ T operator()(T t) const {
return t - std::trunc(t);
}
};
template <typename T>
struct Reciprocal {
T one = T(1);
__device__ T operator()(T t) const {
return (one / t);
}
};
template <typename T>
struct Sign {
C10_DEVICE T operator()(T t) const {
return c10::signum<T>(t);
}
};
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, sigmoid, Sigmoid)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, round, Round)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, frac, Trunc)
OP_CUSTOM_FUNCTOR(floating_complex_half_bfloat16, reciprocal, Reciprocal)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, sign, Sign)
// note(mkozuki): tensor dtype checks of `neg` kernels.
// Since `check_foreach_api_restrictions` don't require all the tensors to have
// the same dtype, I think it safer to check every single tensor's dtype inside
// negation kernels.
std::vector<Tensor> foreach_tensor_neg_cuda(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_neg_slow(tensors);
}
TORCH_CHECK(
tensors[0].scalar_type() != kBool,
"Negation, the `-` operator, on a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
return all_types_half_complex_bfloat16<std::negate>(tensors);
}
void foreach_tensor_neg_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_neg_slow_(tensors);
}
TORCH_CHECK(
tensors[0].scalar_type() != kBool,
"Negation, the `-` operator, on a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
all_types_half_complex_bfloat16_<std::negate>(tensors);
}
// Abs have to go via slow path in case of a complex type.
// This is because foreach kernels can't return a different dtype than passed,
// while abs with complex inputs will produce float output.
template <typename T>
struct Abs {
__device__ T operator()(T t) const {
return std::abs(t);
}
};
std::vector<Tensor> foreach_tensor_abs_cuda(TensorList tensors) {
check_foreach_api_restrictions(tensors);
const bool has_complex =
std::any_of(tensors.begin(), tensors.end(), [](const auto& t) {
return at::isComplexType(t.scalar_type());
});
if (!can_use_fast_route(tensors) || has_complex) {
return at::native::foreach_tensor_abs_slow(tensors);
}
return all_types_complex_bfloat16_half_bool<Abs>(tensors);
}
void foreach_tensor_abs_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
const bool has_complex =
std::any_of(tensors.begin(), tensors.end(), [](const auto& t) {
return at::isComplexType(t.scalar_type());
});
if (!can_use_fast_route(tensors) || has_complex) {
return at::native::foreach_tensor_abs_slow_(tensors);
}
all_types_complex_bfloat16_half_bool_<Abs>(tensors);
}
void foreach_tensor_zero_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_zero_slow_(tensors);
}
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_zero_cuda_",
[&]() {
multi_tensor_apply<1>(
tensor_lists,
ZeroFunctor<
scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>());
});
}
} // namespace at::native
| 16a77d4b06a2c277c5d8b1723ad1f89f7273cddb.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/ForeachUtils.h>
#include <c10/util/TypeSafeSignMath.h>
#include <ATen/native/cuda/ForeachFunctors.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_foreach_abs_native.h>
#include <ATen/ops/_foreach_acos_native.h>
#include <ATen/ops/_foreach_asin_native.h>
#include <ATen/ops/_foreach_atan_native.h>
#include <ATen/ops/_foreach_ceil_native.h>
#include <ATen/ops/_foreach_cos_native.h>
#include <ATen/ops/_foreach_cosh_native.h>
#include <ATen/ops/_foreach_erf_native.h>
#include <ATen/ops/_foreach_erfc_native.h>
#include <ATen/ops/_foreach_exp_native.h>
#include <ATen/ops/_foreach_expm1_native.h>
#include <ATen/ops/_foreach_floor_native.h>
#include <ATen/ops/_foreach_frac_native.h>
#include <ATen/ops/_foreach_lgamma_native.h>
#include <ATen/ops/_foreach_log10_native.h>
#include <ATen/ops/_foreach_log1p_native.h>
#include <ATen/ops/_foreach_log2_native.h>
#include <ATen/ops/_foreach_log_native.h>
#include <ATen/ops/_foreach_neg_native.h>
#include <ATen/ops/_foreach_reciprocal_native.h>
#include <ATen/ops/_foreach_round_native.h>
#include <ATen/ops/_foreach_sigmoid_native.h>
#include <ATen/ops/_foreach_sign_native.h>
#include <ATen/ops/_foreach_sin_native.h>
#include <ATen/ops/_foreach_sinh_native.h>
#include <ATen/ops/_foreach_sqrt_native.h>
#include <ATen/ops/_foreach_tan_native.h>
#include <ATen/ops/_foreach_tanh_native.h>
#include <ATen/ops/_foreach_trunc_native.h>
#include <ATen/ops/_foreach_zero_native.h>
#include <ATen/ops/empty_like_native.h>
#endif
namespace at::native {
template <typename scalar_t, template <class> class Op>
std::vector<Tensor> foreach_unary_op(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
std::vector<at::Tensor> vec_res;
vec_res.reserve(tensors.size());
for (const auto& t : tensors) {
vec_res.emplace_back(at::native::empty_like(t));
}
tensor_lists.emplace_back(tensors.vec());
tensor_lists.emplace_back(std::move(vec_res));
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<2>(
tensor_lists,
UnaryOpFunctor<
scalar_t,
/* depth */ 2,
/* r_args_depth */ 1,
/* res_arg_index */ 1>(),
Op<opmath_t>());
return tensor_lists[1];
}
template <typename scalar_t, template <class> class Op>
void foreach_unary_op_(TensorList tensors) {
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
using opmath_t = typename at::opmath_type<scalar_t>;
multi_tensor_apply<1>(
tensor_lists,
UnaryOpFunctor<
scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>(),
Op<opmath_t>());
increment_version(tensors);
}
template <template <class> class Op>
std::vector<Tensor> floating_complex_half(TensorList tensors) {
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(
ScalarType::Half,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void floating_complex_half_(TensorList tensors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(
ScalarType::Half,
tensors[0].scalar_type(),
"foreach_unary_op_cuda_",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
std::vector<Tensor> all_types_complex_bfloat16_half_bool(TensorList tensors) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half,
ScalarType::BFloat16,
ScalarType::Bool,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void all_types_complex_bfloat16_half_bool_(TensorList tensors) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
ScalarType::Half,
ScalarType::BFloat16,
ScalarType::Bool,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
std::vector<Tensor> floating_complex_half_bfloat16(TensorList tensors) {
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void floating_complex_half_bfloat16_(TensorList tensors) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda_",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
std::vector<Tensor> all_types_half_complex_bfloat16(TensorList tensors) {
return AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half,
at::ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void all_types_half_complex_bfloat16_(TensorList tensors) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
ScalarType::Half,
at::ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda_",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
std::vector<Tensor> floating_half(TensorList tensors) {
return AT_DISPATCH_FLOATING_TYPES_AND(
ScalarType::Half,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void floating_half_(TensorList tensors) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
tensors[0].scalar_type(), "foreach_unary_op_cuda_", [&]() {
foreach_unary_op_<scalar_t, Op>(tensors);
});
}
template <template <class> class Op>
std::vector<Tensor> floating_half_bfloat16(TensorList tensors) {
return AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda",
[&]() { return foreach_unary_op<scalar_t, Op>(tensors); });
}
template <template <class> class Op>
void floating_half_bfloat16_(TensorList tensors) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_unary_op_cuda_",
[&]() { foreach_unary_op_<scalar_t, Op>(tensors); });
}
// makes the functor
#define STD_FUNCTOR(op_name, functor_name) \
template <typename T> \
struct functor_name { \
__device__ T operator()(T t) const { \
return std::op_name(t); \
} \
};
// given a functor and a "dispatch function", creates the outplace and inplace
// operations
#define OP_CUSTOM_FUNCTOR(function, op_name, functor_name) \
std::vector<Tensor> foreach_tensor_##op_name##_cuda(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors) || \
has_integral_tensor(tensors, /* includeBool */ true)) { \
return at::native::foreach_tensor_##op_name##_slow(tensors); \
} \
return function<functor_name>(tensors); \
} \
void foreach_tensor_##op_name##_cuda_(TensorList tensors) { \
check_foreach_api_restrictions(tensors); \
if (!can_use_fast_route(tensors) || \
has_integral_tensor(tensors, /* includeBool */ true)) { \
return at::native::foreach_tensor_##op_name##_slow_(tensors); \
} \
\
function##_<functor_name>(tensors); \
}
// creates a functor, outplace version, and inplace version.
#define OP(function, op_name, functor_name) \
STD_FUNCTOR(op_name, functor_name); \
OP_CUSTOM_FUNCTOR(function, op_name, functor_name);
OP(floating_half_bfloat16, erfc, Erfc);
OP(floating_half, lgamma, Lgamma);
OP(floating_half_bfloat16, trunc, Truncf);
OP(floating_half_bfloat16, floor, Floor);
OP(floating_half_bfloat16, ceil, Ceil);
OP(floating_complex_half_bfloat16, acos, Acos);
OP(floating_complex_half_bfloat16, asin, Asin);
OP(floating_complex_half_bfloat16, atan, Atan);
OP(floating_complex_half_bfloat16, cosh, Cosh);
OP(floating_complex_half_bfloat16, tan, Tan);
OP(floating_complex_half_bfloat16, sin, Sin);
OP(floating_complex_half_bfloat16, sinh, Sinh);
OP(floating_complex_half_bfloat16, exp, Exp);
OP(floating_complex_half_bfloat16, expm1, Expm1);
OP(floating_complex_half_bfloat16, tanh, Tanh);
OP(floating_complex_half_bfloat16, log, Log);
OP(floating_complex_half_bfloat16, log10, Log10);
OP(floating_complex_half_bfloat16, log2, Log2);
OP(floating_complex_half_bfloat16, log1p, Log1p);
OP(floating_complex_half_bfloat16, cos, Cos);
OP(floating_complex_half_bfloat16, sqrt, Sqrt);
OP(floating_half_bfloat16, erf, Erf);
//
// Special cases
// These functions must be special cased as they can't be written as
// std::functor_name in OP macro
//
template <typename T>
struct Sigmoid {
T one = T(1);
__device__ T operator()(T t) const {
return (one / (one + std::exp(-t)));
}
};
template <typename T>
struct Round {
__device__ T operator()(T t) const {
return std::nearbyint(t);
}
};
template <typename T>
struct Trunc {
__device__ T operator()(T t) const {
return t - std::trunc(t);
}
};
template <typename T>
struct Reciprocal {
T one = T(1);
__device__ T operator()(T t) const {
return (one / t);
}
};
template <typename T>
struct Sign {
C10_DEVICE T operator()(T t) const {
return c10::signum<T>(t);
}
};
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, sigmoid, Sigmoid)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, round, Round)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, frac, Trunc)
OP_CUSTOM_FUNCTOR(floating_complex_half_bfloat16, reciprocal, Reciprocal)
OP_CUSTOM_FUNCTOR(floating_half_bfloat16, sign, Sign)
// note(mkozuki): tensor dtype checks of `neg` kernels.
// Since `check_foreach_api_restrictions` don't require all the tensors to have
// the same dtype, I think it safer to check every single tensor's dtype inside
// negation kernels.
std::vector<Tensor> foreach_tensor_neg_cuda(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_neg_slow(tensors);
}
TORCH_CHECK(
tensors[0].scalar_type() != kBool,
"Negation, the `-` operator, on a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
return all_types_half_complex_bfloat16<std::negate>(tensors);
}
void foreach_tensor_neg_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_neg_slow_(tensors);
}
TORCH_CHECK(
tensors[0].scalar_type() != kBool,
"Negation, the `-` operator, on a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
all_types_half_complex_bfloat16_<std::negate>(tensors);
}
// Abs have to go via slow path in case of a complex type.
// This is because foreach kernels can't return a different dtype than passed,
// while abs with complex inputs will produce float output.
template <typename T>
struct Abs {
__device__ T operator()(T t) const {
return std::abs(t);
}
};
std::vector<Tensor> foreach_tensor_abs_cuda(TensorList tensors) {
check_foreach_api_restrictions(tensors);
const bool has_complex =
std::any_of(tensors.begin(), tensors.end(), [](const auto& t) {
return at::isComplexType(t.scalar_type());
});
if (!can_use_fast_route(tensors) || has_complex) {
return at::native::foreach_tensor_abs_slow(tensors);
}
return all_types_complex_bfloat16_half_bool<Abs>(tensors);
}
void foreach_tensor_abs_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
const bool has_complex =
std::any_of(tensors.begin(), tensors.end(), [](const auto& t) {
return at::isComplexType(t.scalar_type());
});
if (!can_use_fast_route(tensors) || has_complex) {
return at::native::foreach_tensor_abs_slow_(tensors);
}
all_types_complex_bfloat16_half_bool_<Abs>(tensors);
}
void foreach_tensor_zero_cuda_(TensorList tensors) {
check_foreach_api_restrictions(tensors);
if (!can_use_fast_route(tensors)) {
return at::native::foreach_tensor_zero_slow_(tensors);
}
std::vector<std::vector<at::Tensor>> tensor_lists;
tensor_lists.emplace_back(tensors.vec());
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
tensors[0].scalar_type(),
"foreach_zero_cuda_",
[&]() {
multi_tensor_apply<1>(
tensor_lists,
ZeroFunctor<
scalar_t,
/* depth */ 1,
/* r_args_depth */ 1,
/* res_arg_index */ 0>());
});
}
} // namespace at::native
|
6487b245b485cdb13cb221c4fc7426c72b6ace46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc suma.cu -o v && ./v
#include <bits/stdc++.h>
using namespace std;
#define THREADS_PER_BLOCK 1024 //depende de la arquitectura
//#define THREADS_PER_BLOCK 16
#define threadsPB 8
void random_ints(int **&M, int rows, int cols){
for (int i =0; i < rows; ++i){
for (int j =0; j < cols; ++j){
M[i][j] = 1+rand()%10;
}
}
}
void resize(int **&M,int rows, int cols){
M = (int **) malloc(rows * sizeof(int*)) ;
for(int i = 0; i<rows; i++) {
M[i] = (int *) malloc(cols * sizeof(int));
}
}
void imprimir(int **&M, int rows, int cols){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
cout<<M[i][j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
//void createMatrixHostCUDA(int**& host, int**& device, int **& aux, int size, int rows, int cols ){
void createMatrixHostCUDA(int**& device, int rows, int cols ){
//aux =(int **)malloc(rows*sizeof(int*));
//hipMalloc((void **)&aux[0],size);
hipMalloc((void **)&device,rows*sizeof(int*));
//for (int i=1; i<rows;++i){
// aux[i]=aux[i-1]+cols;
//}
//hipMemcpy(device, aux, rows*sizeof(int*), hipMemcpyHostToDevice);
}
//=================cuda=================
__global__ void sum(int **A, int **B, int **R, int rows, int cols){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i<rows && j<cols){
R[i][j] = A[i][j] + B[i][j];
}
}
void cuda_suma(int **h_A, int **h_B, int **h_R, int rows, int cols ){
int **d_A, **d_B, **d_R; //device copias
//int **a_aux, **b_aux, **c_aux;
int size = rows * cols * sizeof(int*);
//dar memoria en GPU
//createMatrixHostCUDA(h_A,d_A,a_aux,size,rows,cols);
//createMatrixHostCUDA(h_B,d_B,b_aux,size,rows,cols);
//createMatrixHostCUDA(h_R,d_R,c_aux,size,rows,cols);
createMatrixHostCUDA(d_A,rows,cols);
createMatrixHostCUDA(d_B,rows,cols);
createMatrixHostCUDA(d_R,rows,cols);
//copiar HOST -> DEVICE
//hipMemcpy(a_aux[0], h_A[0], size, hipMemcpyHostToDevice);
//hipMemcpy(b_aux[0], h_B[0], size, hipMemcpyHostToDevice);
//run kernel //almenos debe contener un bloque
dim3 threadPerBlock(threadsPB, threadsPB);
dim3 blockPerGrid((rows+threadPerBlock.x-1)/threadPerBlock.x,(cols+threadPerBlock.y-1)/threadPerBlock.y);
hipLaunchKernelGGL(( sum), dim3(blockPerGrid),dim3(threadPerBlock), 0, 0, d_A,d_B,d_R,rows,cols);
//sum<<<1,512>>>(d_A,d_B,d_R,rows,cols);
//=====
//=====
//copiar DEVICE -> HOST
hipMemcpy(h_R,d_R, size, hipMemcpyDeviceToHost);
//free(h_A); free(h_B); free(h_R);
hipFree(d_A); hipFree(d_B); hipFree(d_R);
//hipFree(a_aux); hipFree(b_aux);hipFree(c_aux);
}
//======================================
int main(){
int rows = 8;
int cols = 8;
int **A, **B, **R;
resize(A,rows,cols);
resize(B,rows,cols);
resize(R,rows,cols);
random_ints(A,rows,cols);
random_ints(B,rows,cols);
cuda_suma(A,B,R,rows,cols);
imprimir(A,rows,cols);
imprimir(B,rows,cols);
imprimir(R,rows,cols);
}
/*
int main(){
int rows=row;
int cols=column;
//srand (time(NULL));
int **h_A, **h_B, **h_R;
int **d_A, **d_B, **d_R;
int **a_aux, **b_aux, **c_aux;
int size = row* column * sizeof(int*);
createMatrixHostCUDA(h_A,d_A,a_aux,size,row,column);
createMatrixHostCUDA(h_B,d_B,b_aux,size,row,column);
createMatrixHostCUDA(h_R,d_R,c_aux,size,row,column);
random_ints(h_A,rows,cols);
random_ints(h_B,rows,cols);
hipMemcpy(a_aux[0], h_A[0], size, hipMemcpyHostToDevice);
hipMemcpy(b_aux[0], h_B[0], size, hipMemcpyHostToDevice);
dim3 threadPerBlock(threadsPB, threadsPB);
dim3 blockPerGrid((rows+threadPerBlock.x-1)/threadPerBlock.x,(cols+threadPerBlock.y-1)/threadPerBlock.y);
//scalarMult<<<blockPerGrid,threadPerBlock>>>(d_A,2,d_R);
Multi<<<blockPerGrid,threadPerBlock>>>(d_A,d_B,d_R);
hipMemcpy(h_R[0],c_aux[0], size, hipMemcpyDeviceToHost);
print(h_A,rows,cols);
print(h_B,rows,cols);
print(h_R,rows,cols);
free(h_A); free(h_B); free(h_R);
hipFree(d_A); hipFree(d_B); hipFree(d_R);
hipFree(a_aux[0]);hipFree(c_aux[0]);
return 0;
}*/ | 6487b245b485cdb13cb221c4fc7426c72b6ace46.cu | // nvcc suma.cu -o v && ./v
#include <bits/stdc++.h>
using namespace std;
#define THREADS_PER_BLOCK 1024 //depende de la arquitectura
//#define THREADS_PER_BLOCK 16
#define threadsPB 8
void random_ints(int **&M, int rows, int cols){
for (int i =0; i < rows; ++i){
for (int j =0; j < cols; ++j){
M[i][j] = 1+rand()%10;
}
}
}
void resize(int **&M,int rows, int cols){
M = (int **) malloc(rows * sizeof(int*)) ;
for(int i = 0; i<rows; i++) {
M[i] = (int *) malloc(cols * sizeof(int));
}
}
void imprimir(int **&M, int rows, int cols){
for(int i=0;i<rows;i++){
for(int j=0;j<cols;j++){
cout<<M[i][j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
//void createMatrixHostCUDA(int**& host, int**& device, int **& aux, int size, int rows, int cols ){
void createMatrixHostCUDA(int**& device, int rows, int cols ){
//aux =(int **)malloc(rows*sizeof(int*));
//cudaMalloc((void **)&aux[0],size);
cudaMalloc((void **)&device,rows*sizeof(int*));
//for (int i=1; i<rows;++i){
// aux[i]=aux[i-1]+cols;
//}
//cudaMemcpy(device, aux, rows*sizeof(int*), cudaMemcpyHostToDevice);
}
//=================cuda=================
__global__ void sum(int **A, int **B, int **R, int rows, int cols){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i<rows && j<cols){
R[i][j] = A[i][j] + B[i][j];
}
}
void cuda_suma(int **h_A, int **h_B, int **h_R, int rows, int cols ){
int **d_A, **d_B, **d_R; //device copias
//int **a_aux, **b_aux, **c_aux;
int size = rows * cols * sizeof(int*);
//dar memoria en GPU
//createMatrixHostCUDA(h_A,d_A,a_aux,size,rows,cols);
//createMatrixHostCUDA(h_B,d_B,b_aux,size,rows,cols);
//createMatrixHostCUDA(h_R,d_R,c_aux,size,rows,cols);
createMatrixHostCUDA(d_A,rows,cols);
createMatrixHostCUDA(d_B,rows,cols);
createMatrixHostCUDA(d_R,rows,cols);
//copiar HOST -> DEVICE
//cudaMemcpy(a_aux[0], h_A[0], size, cudaMemcpyHostToDevice);
//cudaMemcpy(b_aux[0], h_B[0], size, cudaMemcpyHostToDevice);
//run kernel //almenos debe contener un bloque
dim3 threadPerBlock(threadsPB, threadsPB);
dim3 blockPerGrid((rows+threadPerBlock.x-1)/threadPerBlock.x,(cols+threadPerBlock.y-1)/threadPerBlock.y);
sum<<<blockPerGrid,threadPerBlock>>>(d_A,d_B,d_R,rows,cols);
//sum<<<1,512>>>(d_A,d_B,d_R,rows,cols);
//=====
//=====
//copiar DEVICE -> HOST
cudaMemcpy(h_R,d_R, size, cudaMemcpyDeviceToHost);
//free(h_A); free(h_B); free(h_R);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_R);
//cudaFree(a_aux); cudaFree(b_aux);cudaFree(c_aux);
}
//======================================
int main(){
int rows = 8;
int cols = 8;
int **A, **B, **R;
resize(A,rows,cols);
resize(B,rows,cols);
resize(R,rows,cols);
random_ints(A,rows,cols);
random_ints(B,rows,cols);
cuda_suma(A,B,R,rows,cols);
imprimir(A,rows,cols);
imprimir(B,rows,cols);
imprimir(R,rows,cols);
}
/*
int main(){
int rows=row;
int cols=column;
//srand (time(NULL));
int **h_A, **h_B, **h_R;
int **d_A, **d_B, **d_R;
int **a_aux, **b_aux, **c_aux;
int size = row* column * sizeof(int*);
createMatrixHostCUDA(h_A,d_A,a_aux,size,row,column);
createMatrixHostCUDA(h_B,d_B,b_aux,size,row,column);
createMatrixHostCUDA(h_R,d_R,c_aux,size,row,column);
random_ints(h_A,rows,cols);
random_ints(h_B,rows,cols);
cudaMemcpy(a_aux[0], h_A[0], size, cudaMemcpyHostToDevice);
cudaMemcpy(b_aux[0], h_B[0], size, cudaMemcpyHostToDevice);
dim3 threadPerBlock(threadsPB, threadsPB);
dim3 blockPerGrid((rows+threadPerBlock.x-1)/threadPerBlock.x,(cols+threadPerBlock.y-1)/threadPerBlock.y);
//scalarMult<<<blockPerGrid,threadPerBlock>>>(d_A,2,d_R);
Multi<<<blockPerGrid,threadPerBlock>>>(d_A,d_B,d_R);
cudaMemcpy(h_R[0],c_aux[0], size, cudaMemcpyDeviceToHost);
print(h_A,rows,cols);
print(h_B,rows,cols);
print(h_R,rows,cols);
free(h_A); free(h_B); free(h_R);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_R);
cudaFree(a_aux[0]);cudaFree(c_aux[0]);
return 0;
}*/ |
df5f079b12da1cf36e107fdc8f06b153a1dfc5d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaDefs.h>
#include <rocblas.h>
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
hipblasStatus_t status = hipblasStatus_t();
hipblasHandle_t handle = hipblasHandle_t();
const unsigned int N = 5;
const unsigned int dim = 3;
const unsigned int MEMSIZE = N * dim * sizeof(float);
const unsigned int THREAD_PER_BLOCK = 128;
const unsigned int GRID_SIZE = (N * dim + THREAD_PER_BLOCK - 1)/THREAD_PER_BLOCK;
const unsigned int GRID_SIZE2 = (N * N + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK;
void fillData(float *data, const unsigned int length, const unsigned int dim)
{
unsigned int id = 0;
for (unsigned int i=0; i<length; i++)
{
for (unsigned int j=0; j<dim; j++)
{
data[id++]= i & 255; //=i%256
}
}
}
void fillDataWithNumber(float *data, const unsigned int length, const unsigned int dim, const float number)
{
unsigned int id = 0;
for (unsigned int i=0; i<length; i++)
{
for (unsigned int j=0; j<dim; j++)
{
data[id++]= number;
}
}
}
__global__ void kernelPowerTwo(const float *a, const float *b, const unsigned int length, float *a2, float *b2)
{
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < length)
{
float x = a[tid];
a2[tid] = x * x;
x = b[tid];
b2[tid] = x * x;
}
}
__global__ void kernelSqrt(float* m, const unsigned int length, float* r)
{
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < length)
r[tid] = sqrtf(m[tid]);
}
int main(int argc, char *argv[])
{
initializeCUDA(deviceProp);
status = hipblasCreate(&handle) ;
float alpha, beta;
float *a, *b, *m;
float *da, *da2, *db, *db2, *dm;
float *ones, *dones;
// paged-locked allocation
hipHostMalloc((void**)&a, MEMSIZE,hipHostMallocDefault);
hipHostMalloc((void**)&b, MEMSIZE,hipHostMallocDefault);
hipHostMalloc((void**)&ones, MEMSIZE,hipHostMallocDefault);
hipHostMalloc((void**)&m, N * N * sizeof(float),hipHostMallocDefault);
hipMalloc( (void**)&da, MEMSIZE );
hipMalloc( (void**)&da2, MEMSIZE );
hipMalloc( (void**)&db, MEMSIZE );
hipMalloc( (void**)&db2, MEMSIZE );
hipMalloc( (void**)&dones, MEMSIZE );
hipMalloc( (void**)&dm, N * N * sizeof(float));
fillData(a, N, dim);
fillData(b, N, dim);
fillDataWithNumber(ones, N, dim, 1.0f);
//Copy data to DEVICE
hipMemcpy(da, a, MEMSIZE, hipMemcpyHostToDevice);
hipMemcpy(db, b, MEMSIZE, hipMemcpyHostToDevice);
hipMemcpy(dones, ones, MEMSIZE, hipMemcpyHostToDevice);
//TODO 1: Process a -> a^2 and b->b^2
hipLaunchKernelGGL(( kernelPowerTwo), dim3(GRID_SIZE), dim3(THREAD_PER_BLOCK), 0, 0, da, db, N * dim, da2, db2);
//TODO 2: Process a^2 + b^2 using CUBLAS //pair-wise operation such that the result is dm[N*N] matrix
alpha = 1.f; beta = 0.f;
hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, N, dim, &alpha, da2, dim, ones, dim, &beta, dm, N);
beta = 1.f;
hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, N, dim, &alpha, ones, dim, db2, dim, &beta, dm, N);
//TODO 3: Process -2ab and sum with previous result stored in dm using CUBLAS
alpha = -2.f;
hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, N, dim, &alpha, da, dim, db, dim, &beta, dm, N);
// SQRT for distance result
hipLaunchKernelGGL(( kernelSqrt), dim3(GRID_SIZE2), dim3(THREAD_PER_BLOCK), 0, 0, dm, N * N, dm);
checkDeviceMatrix<float>(da, sizeof(float)*dim, N, dim, "%f ", "A");
checkDeviceMatrix<float>(da2, sizeof(float)*dim, N, dim, "%f ", "A^2");
checkDeviceMatrix<float>(db, sizeof(float)*dim, N, dim, "%f ", "B");
checkDeviceMatrix<float>(db2, sizeof(float)*dim, N, dim, "%f ", "B^2");
checkDeviceMatrix<float>(dones, sizeof(float)*dim, N, dim, "%f ", "ONES");
checkDeviceMatrix<float>(dm, sizeof(float)*N, N, N, "%f ", "M");
hipFree(da);
hipFree(da2);
hipFree(db);
hipFree(db2);
hipFree(dm);
hipFree(dones);
hipHostFree(a);
hipHostFree(b);
hipHostFree(m);
hipHostFree(ones);
status = hipblasDestroy(handle);
}
| df5f079b12da1cf36e107fdc8f06b153a1dfc5d4.cu | #include <cudaDefs.h>
#include <cublas_v2.h>
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
cublasStatus_t status = cublasStatus_t();
cublasHandle_t handle = cublasHandle_t();
const unsigned int N = 5;
const unsigned int dim = 3;
const unsigned int MEMSIZE = N * dim * sizeof(float);
const unsigned int THREAD_PER_BLOCK = 128;
const unsigned int GRID_SIZE = (N * dim + THREAD_PER_BLOCK - 1)/THREAD_PER_BLOCK;
const unsigned int GRID_SIZE2 = (N * N + THREAD_PER_BLOCK - 1) / THREAD_PER_BLOCK;
void fillData(float *data, const unsigned int length, const unsigned int dim)
{
unsigned int id = 0;
for (unsigned int i=0; i<length; i++)
{
for (unsigned int j=0; j<dim; j++)
{
data[id++]= i & 255; //=i%256
}
}
}
void fillDataWithNumber(float *data, const unsigned int length, const unsigned int dim, const float number)
{
unsigned int id = 0;
for (unsigned int i=0; i<length; i++)
{
for (unsigned int j=0; j<dim; j++)
{
data[id++]= number;
}
}
}
__global__ void kernelPowerTwo(const float *a, const float *b, const unsigned int length, float *a2, float *b2)
{
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < length)
{
float x = a[tid];
a2[tid] = x * x;
x = b[tid];
b2[tid] = x * x;
}
}
__global__ void kernelSqrt(float* m, const unsigned int length, float* r)
{
unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < length)
r[tid] = sqrtf(m[tid]);
}
int main(int argc, char *argv[])
{
initializeCUDA(deviceProp);
status = cublasCreate(&handle) ;
float alpha, beta;
float *a, *b, *m;
float *da, *da2, *db, *db2, *dm;
float *ones, *dones;
// paged-locked allocation
cudaHostAlloc((void**)&a, MEMSIZE,cudaHostAllocDefault);
cudaHostAlloc((void**)&b, MEMSIZE,cudaHostAllocDefault);
cudaHostAlloc((void**)&ones, MEMSIZE,cudaHostAllocDefault);
cudaHostAlloc((void**)&m, N * N * sizeof(float),cudaHostAllocDefault);
cudaMalloc( (void**)&da, MEMSIZE );
cudaMalloc( (void**)&da2, MEMSIZE );
cudaMalloc( (void**)&db, MEMSIZE );
cudaMalloc( (void**)&db2, MEMSIZE );
cudaMalloc( (void**)&dones, MEMSIZE );
cudaMalloc( (void**)&dm, N * N * sizeof(float));
fillData(a, N, dim);
fillData(b, N, dim);
fillDataWithNumber(ones, N, dim, 1.0f);
//Copy data to DEVICE
cudaMemcpy(da, a, MEMSIZE, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, MEMSIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dones, ones, MEMSIZE, cudaMemcpyHostToDevice);
//TODO 1: Process a -> a^2 and b->b^2
kernelPowerTwo<<<GRID_SIZE, THREAD_PER_BLOCK>>>(da, db, N * dim, da2, db2);
//TODO 2: Process a^2 + b^2 using CUBLAS //pair-wise operation such that the result is dm[N*N] matrix
alpha = 1.f; beta = 0.f;
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, N, dim, &alpha, da2, dim, ones, dim, &beta, dm, N);
beta = 1.f;
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, N, dim, &alpha, ones, dim, db2, dim, &beta, dm, N);
//TODO 3: Process -2ab and sum with previous result stored in dm using CUBLAS
alpha = -2.f;
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, N, dim, &alpha, da, dim, db, dim, &beta, dm, N);
// SQRT for distance result
kernelSqrt<<<GRID_SIZE2, THREAD_PER_BLOCK>>>(dm, N * N, dm);
checkDeviceMatrix<float>(da, sizeof(float)*dim, N, dim, "%f ", "A");
checkDeviceMatrix<float>(da2, sizeof(float)*dim, N, dim, "%f ", "A^2");
checkDeviceMatrix<float>(db, sizeof(float)*dim, N, dim, "%f ", "B");
checkDeviceMatrix<float>(db2, sizeof(float)*dim, N, dim, "%f ", "B^2");
checkDeviceMatrix<float>(dones, sizeof(float)*dim, N, dim, "%f ", "ONES");
checkDeviceMatrix<float>(dm, sizeof(float)*N, N, N, "%f ", "M");
cudaFree(da);
cudaFree(da2);
cudaFree(db);
cudaFree(db2);
cudaFree(dm);
cudaFree(dones);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(m);
cudaFreeHost(ones);
status = cublasDestroy(handle);
}
|
aad2b937930a34316991176b51634698dd4b34e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define PI 3.1415926535
// Pytchography kernels
void __global__ mul(float2 *g, float2 *f, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0||stx>N-1||sty>Nz-1) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 f0 = f[(stx+ix)+(sty+iy)*N+tz*Nz*N];
float2 prb0 = prb[ix+iy*Nprb+tz*Nprb*Nprb];
float c = 1/sqrtf(detx*dety);//fft constant
g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan].x = c*prb0.x*f0.x-c*prb0.y*f0.y;
g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan].y = c*prb0.x*f0.y+c*prb0.y*f0.x;
}
void __global__ mula(float2 *f, float2 *g, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0||stx>N-1||sty>Nz-1) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 g0 = g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan];
float2 prb0 = prb[ix+iy*Nprb+tz*Nprb*Nprb];
float c = 1/sqrtf(detx*dety);//fft constant
atomicAdd(&f[(stx+ix)+(sty+iy)*N+tz*Nz*N].x, c*prb0.x*g0.x+c*prb0.y*g0.y);
atomicAdd(&f[(stx+ix)+(sty+iy)*N+tz*Nz*N].y, c*prb0.x*g0.y-c*prb0.y*g0.x);
}
void __global__ mulaprb(float2 *f, float2 *g, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 g0 = g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan];
float2 f0 = f[(stx+ix)+(sty+iy)*N+tz*Nz*N];
float c = 1/sqrtf(detx*dety);//fft constant
atomicAdd(&prb[ix+iy*Nprb+tz*Nprb*Nprb].x, c*f0.x*g0.x+c*f0.y*g0.y);
atomicAdd(&prb[ix+iy*Nprb+tz*Nprb*Nprb].y, c*f0.x*g0.y-c*f0.y*g0.x);
}
void __global__ updatepsi(float2* f, float2* ff, float2* ftmp0, float2* ftmp1,
float2* fff, float rho, float gamma, float maxint, int Ntheta, int Nz,int N)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=N||ty>=Nz||tz>=Ntheta) return;
int ind = tx+ty*N+tz*N*Nz;
f[ind].x = (1-rho*gamma)*f[ind].x+rho*gamma*(ff[ind].x-fff[ind].x/rho) +
gamma/2*(ftmp0[ind].x-ftmp1[ind].x)/maxint;
f[ind].y = (1-rho*gamma)*f[ind].y+rho*gamma*(ff[ind].y-fff[ind].y/rho) +
gamma/2*(ftmp0[ind].y-ftmp1[ind].y)/maxint;
}
void __global__ takeshifts(float2* shiftx,float2* shifty,float* scanx,float* scany,int Ntheta, int Nscan)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
if (tx>=Nscan||ty>=Ntheta) return;
int ind = tx+ty*Nscan;
shiftx[ind].x = cosf(2*PI*(scanx[ind] - roundf(scanx[ind])));
shiftx[ind].y = sinf(2*PI*(scanx[ind] - roundf(scanx[ind])));
shifty[ind].x = cosf(2*PI*(scany[ind] - roundf(scany[ind])));
shifty[ind].y = sinf(2*PI*(scany[ind] - roundf(scany[ind])));
}
void __global__ shifts(float2* f, float2* shiftx,float2* shifty,int Ntheta, int Nscan, int detxdety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=detxdety||ty>=Nscan||tz>=Ntheta) return;
int ind = tx+ty*detxdety+tz*detxdety*Nscan;
int inds = ty+tz*Nscan;
float2 f0 = f[ind];
float2 shiftx0 = shiftx[inds];
float2 shifty0 = shifty[inds];
f[ind].x = f0.x*shiftx0.x-f0.y*shiftx0.y;
f[ind].y = f0.y*shiftx0.x+f0.x*shiftx0.y;
f0 = f[ind];
f[ind].x = f0.x*shifty0.x-f0.y*shifty0.y;
f[ind].y = f0.y*shifty0.x+f0.x*shifty0.y;
}
void __global__ shiftsa(float2* f, float2* shiftx,float2* shifty,int Ntheta, int Nscan, int detxdety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=detxdety||ty>=Nscan||tz>=Ntheta) return;
int ind = tx+ty*detxdety+tz*detxdety*Nscan;
int inds = ty+tz*Nscan;
float2 f0 = f[ind];
float2 shiftx0 = shiftx[inds];
float2 shifty0 = shifty[inds];
f[ind].x = f0.x*shiftx0.x+f0.y*shiftx0.y;
f[ind].y = f0.y*shiftx0.x-f0.x*shiftx0.y;
f0 = f[ind];
f[ind].x = f0.x*shifty0.x+f0.y*shifty0.y;
f[ind].y = f0.y*shifty0.x-f0.x*shifty0.y;
}
| aad2b937930a34316991176b51634698dd4b34e0.cu | #define PI 3.1415926535
// Pytchography kernels
void __global__ mul(float2 *g, float2 *f, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0||stx>N-1||sty>Nz-1) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 f0 = f[(stx+ix)+(sty+iy)*N+tz*Nz*N];
float2 prb0 = prb[ix+iy*Nprb+tz*Nprb*Nprb];
float c = 1/sqrtf(detx*dety);//fft constant
g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan].x = c*prb0.x*f0.x-c*prb0.y*f0.y;
g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan].y = c*prb0.x*f0.y+c*prb0.y*f0.x;
}
void __global__ mula(float2 *f, float2 *g, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0||stx>N-1||sty>Nz-1) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 g0 = g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan];
float2 prb0 = prb[ix+iy*Nprb+tz*Nprb*Nprb];
float c = 1/sqrtf(detx*dety);//fft constant
atomicAdd(&f[(stx+ix)+(sty+iy)*N+tz*Nz*N].x, c*prb0.x*g0.x+c*prb0.y*g0.y);
atomicAdd(&f[(stx+ix)+(sty+iy)*N+tz*Nz*N].y, c*prb0.x*g0.y-c*prb0.y*g0.x);
}
void __global__ mulaprb(float2 *f, float2 *g, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 g0 = g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan];
float2 f0 = f[(stx+ix)+(sty+iy)*N+tz*Nz*N];
float c = 1/sqrtf(detx*dety);//fft constant
atomicAdd(&prb[ix+iy*Nprb+tz*Nprb*Nprb].x, c*f0.x*g0.x+c*f0.y*g0.y);
atomicAdd(&prb[ix+iy*Nprb+tz*Nprb*Nprb].y, c*f0.x*g0.y-c*f0.y*g0.x);
}
void __global__ updatepsi(float2* f, float2* ff, float2* ftmp0, float2* ftmp1,
float2* fff, float rho, float gamma, float maxint, int Ntheta, int Nz,int N)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=N||ty>=Nz||tz>=Ntheta) return;
int ind = tx+ty*N+tz*N*Nz;
f[ind].x = (1-rho*gamma)*f[ind].x+rho*gamma*(ff[ind].x-fff[ind].x/rho) +
gamma/2*(ftmp0[ind].x-ftmp1[ind].x)/maxint;
f[ind].y = (1-rho*gamma)*f[ind].y+rho*gamma*(ff[ind].y-fff[ind].y/rho) +
gamma/2*(ftmp0[ind].y-ftmp1[ind].y)/maxint;
}
void __global__ takeshifts(float2* shiftx,float2* shifty,float* scanx,float* scany,int Ntheta, int Nscan)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
if (tx>=Nscan||ty>=Ntheta) return;
int ind = tx+ty*Nscan;
shiftx[ind].x = cosf(2*PI*(scanx[ind] - roundf(scanx[ind])));
shiftx[ind].y = sinf(2*PI*(scanx[ind] - roundf(scanx[ind])));
shifty[ind].x = cosf(2*PI*(scany[ind] - roundf(scany[ind])));
shifty[ind].y = sinf(2*PI*(scany[ind] - roundf(scany[ind])));
}
void __global__ shifts(float2* f, float2* shiftx,float2* shifty,int Ntheta, int Nscan, int detxdety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=detxdety||ty>=Nscan||tz>=Ntheta) return;
int ind = tx+ty*detxdety+tz*detxdety*Nscan;
int inds = ty+tz*Nscan;
float2 f0 = f[ind];
float2 shiftx0 = shiftx[inds];
float2 shifty0 = shifty[inds];
f[ind].x = f0.x*shiftx0.x-f0.y*shiftx0.y;
f[ind].y = f0.y*shiftx0.x+f0.x*shiftx0.y;
f0 = f[ind];
f[ind].x = f0.x*shifty0.x-f0.y*shifty0.y;
f[ind].y = f0.y*shifty0.x+f0.x*shifty0.y;
}
void __global__ shiftsa(float2* f, float2* shiftx,float2* shifty,int Ntheta, int Nscan, int detxdety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=detxdety||ty>=Nscan||tz>=Ntheta) return;
int ind = tx+ty*detxdety+tz*detxdety*Nscan;
int inds = ty+tz*Nscan;
float2 f0 = f[ind];
float2 shiftx0 = shiftx[inds];
float2 shifty0 = shifty[inds];
f[ind].x = f0.x*shiftx0.x+f0.y*shiftx0.y;
f[ind].y = f0.y*shiftx0.x-f0.x*shiftx0.y;
f0 = f[ind];
f[ind].x = f0.x*shifty0.x+f0.y*shifty0.y;
f[ind].y = f0.y*shifty0.x-f0.x*shifty0.y;
}
|
0a4050229de3e99448ca3d23676e056462661206.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BLOCK_SIZE 8
#define MAX(a,b) ( a > b ? a : b )
#define MIN(a,b) ( a <= b ? a : b )
#define SIGN(x) ( x >= 0.0 ? 1.0 : -1.0 )
#define ABS(x) ( (x) > 0.0 ? x : -(x) )
#define SQR(x) __fmul_rz((x), (x))
static __global__ void krnl_1(float *pfdv, float *pfpt, float *pfps,
float *pfu, float *pfgk, float cc,
int iNx, int iNy, int iNz, int SZF,
int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<iNx && idz<iNz)
{
int index = idx + __mul24(idy, iNy) + __mul24(idz, SZF);
pfgk[index] = __fadd_rz(__fadd_rz(__fadd_rz(pfdv[index], - pfps[index]), pfpt[index]), - pfu[index]);
}
}
static __global__ void krnl_2(float *pfbx, float *pfgk, float steps,
int iNx, int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<(iNx-1) && idz<iNz )
{
int index = idx + __mul24(idy+1, iNy) + __mul24(idz, SZF);
pfbx[index] = __fadd_rz(__fmul_rz(steps, __fadd_rz(pfgk[index], - pfgk[index-iNy])), pfbx[index]);
}
}
static __global__ void krnl_3(float *pfby, float *pfgk, float steps,
int iNx, int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<(iNy-1) && idy<iNx && idz<iNz)
{
int index =idx + __mul24(idy, iNy) + __mul24(idz, SZF) + 1;
pfby[index] = __fadd_rz(__fmul_rz(steps, __fadd_rz(pfgk[index], - pfgk[index-1])), pfby[index]);
}
}
static __global__ void krnl_z(float *pfbz, float *pfgk, float steps,
int iNx, int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<iNx && idz<(iNz-1))
{
int index = idx + __mul24(idy, iNy) + __mul24(idz+1, SZF);
pfbz[index] = __fadd_rz(__fmul_rz(steps, __fadd_rz(pfgk[index], - pfgk[index-SZF])), pfbz[index]);
}
}
static __global__ void krnl_4(float *pfbx, float *pfby, float *pfbz, float *pfgk, float *pfpenalty,
int iNx, int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
float fpt;
if( idx<iNy && idy<iNx && idz<iNz)
{
int index = idx + __mul24(idy, iNy) + __mul24(idz, SZF);
fpt = __fadd_rz(__fadd_rz(__fadd_rz(__fadd_rz(__fadd_rz(
SQR(pfbx[index+iNy]), SQR(pfbx[index])), SQR(pfby[index])),
SQR(pfby[index+1])), SQR(pfbz[index])), SQR(pfbz[index+SZF]));
fpt = sqrtf(__fmul_rz(fpt, 0.5));
if (fpt > pfpenalty[index])
fpt = __fdividef(fpt, pfpenalty[index]);
else
fpt = 1;
pfgk[index] = __frcp_rz(fpt);
}
}
static __global__ void krnl_5(float *pfbx, float *pfgk, int iNx, int iNy,
int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<(iNx-1) && idz<iNz)
{
int index = idx + __mul24(idy+1, iNy) + __mul24(idz, SZF);
pfbx[index] = __fmul_rz(__fmul_rz(__fadd_rz(pfgk[index], pfgk[index-iNy]), 0.5), pfbx[index]);
}
}
static __global__ void krnl_6(float *pfby, float *pfgk, int iNx,
int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<(iNy-1) && idy<iNx && idz<iNz)
{
int index = idx + __mul24(idy, iNy) + __mul24(idz, SZF)+1;
pfby[index] = __fmul_rz(__fmul_rz(__fadd_rz(pfgk[index], pfgk[index-1]), 0.5), pfby[index]);
}
}
static __global__ void krnl_zp(float *pfbz, float *pfgk, int iNx,
int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<iNx && idz<(iNz-1))
{
int index = idx + __mul24(idy, iNy) + __mul24(idz+1, SZF);
pfbz[index] = __fmul_rz(__fmul_rz(__fadd_rz(pfgk[index], pfgk[index-SZF]), 0.5), pfbz[index]);
}
}
static __global__ void krnl_7(float *pfbx, float *pfby, float *pfbz, float *pfdv,
float *pfps, float *pfpt, float *pfu, float *FPS,
float *pfCs, float *pfCt, float tcc,
int iNx, int iNy, int iNz, int SZF,
int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
float fpt;
if( idx<iNy && idy<iNx && idz<iNz)
{
int index = idx + __mul24(idy, iNy) + __mul24(idz, SZF);
pfdv[index] = __fadd_rz(__fadd_rz(__fadd_rz(__fadd_rz(__fadd_rz(
pfbx[index+iNy], - pfbx[index]),
pfby[index+1]), - pfby[index]),
pfbz[index+SZF]), - pfbz[index]);
fpt = __fadd_rz(__fadd_rz(__fadd_rz(pfpt[index], - pfu[index]), pfdv[index]), tcc);
pfps[index] = MIN(fpt, pfCs[index]);
fpt = __fadd_rz(__fadd_rz(pfps[index], pfu[index]), - pfdv[index]);
pfpt[index] = MIN(fpt, pfCt[index]);
fpt = __fadd_rz(__fadd_rz(pfpt[index], pfdv[index]), - pfps[index]);
FPS[index] = fabsf(fpt);
pfu[index] = __fadd_rz(pfu[index], -fpt);
}
}
| 0a4050229de3e99448ca3d23676e056462661206.cu | #define BLOCK_SIZE 8
#define MAX(a,b) ( a > b ? a : b )
#define MIN(a,b) ( a <= b ? a : b )
#define SIGN(x) ( x >= 0.0 ? 1.0 : -1.0 )
#define ABS(x) ( (x) > 0.0 ? x : -(x) )
#define SQR(x) __fmul_rz((x), (x))
static __global__ void krnl_1(float *pfdv, float *pfpt, float *pfps,
float *pfu, float *pfgk, float cc,
int iNx, int iNy, int iNz, int SZF,
int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<iNx && idz<iNz)
{
int index = idx + __mul24(idy, iNy) + __mul24(idz, SZF);
pfgk[index] = __fadd_rz(__fadd_rz(__fadd_rz(pfdv[index], - pfps[index]), pfpt[index]), - pfu[index]);
}
}
static __global__ void krnl_2(float *pfbx, float *pfgk, float steps,
int iNx, int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<(iNx-1) && idz<iNz )
{
int index = idx + __mul24(idy+1, iNy) + __mul24(idz, SZF);
pfbx[index] = __fadd_rz(__fmul_rz(steps, __fadd_rz(pfgk[index], - pfgk[index-iNy])), pfbx[index]);
}
}
static __global__ void krnl_3(float *pfby, float *pfgk, float steps,
int iNx, int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<(iNy-1) && idy<iNx && idz<iNz)
{
int index =idx + __mul24(idy, iNy) + __mul24(idz, SZF) + 1;
pfby[index] = __fadd_rz(__fmul_rz(steps, __fadd_rz(pfgk[index], - pfgk[index-1])), pfby[index]);
}
}
static __global__ void krnl_z(float *pfbz, float *pfgk, float steps,
int iNx, int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<iNx && idz<(iNz-1))
{
int index = idx + __mul24(idy, iNy) + __mul24(idz+1, SZF);
pfbz[index] = __fadd_rz(__fmul_rz(steps, __fadd_rz(pfgk[index], - pfgk[index-SZF])), pfbz[index]);
}
}
static __global__ void krnl_4(float *pfbx, float *pfby, float *pfbz, float *pfgk, float *pfpenalty,
int iNx, int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
float fpt;
if( idx<iNy && idy<iNx && idz<iNz)
{
int index = idx + __mul24(idy, iNy) + __mul24(idz, SZF);
fpt = __fadd_rz(__fadd_rz(__fadd_rz(__fadd_rz(__fadd_rz(
SQR(pfbx[index+iNy]), SQR(pfbx[index])), SQR(pfby[index])),
SQR(pfby[index+1])), SQR(pfbz[index])), SQR(pfbz[index+SZF]));
fpt = sqrtf(__fmul_rz(fpt, 0.5));
if (fpt > pfpenalty[index])
fpt = __fdividef(fpt, pfpenalty[index]);
else
fpt = 1;
pfgk[index] = __frcp_rz(fpt);
}
}
static __global__ void krnl_5(float *pfbx, float *pfgk, int iNx, int iNy,
int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<(iNx-1) && idz<iNz)
{
int index = idx + __mul24(idy+1, iNy) + __mul24(idz, SZF);
pfbx[index] = __fmul_rz(__fmul_rz(__fadd_rz(pfgk[index], pfgk[index-iNy]), 0.5), pfbx[index]);
}
}
static __global__ void krnl_6(float *pfby, float *pfgk, int iNx,
int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<(iNy-1) && idy<iNx && idz<iNz)
{
int index = idx + __mul24(idy, iNy) + __mul24(idz, SZF)+1;
pfby[index] = __fmul_rz(__fmul_rz(__fadd_rz(pfgk[index], pfgk[index-1]), 0.5), pfby[index]);
}
}
static __global__ void krnl_zp(float *pfbz, float *pfgk, int iNx,
int iNy, int iNz, int SZF, int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
if( idx<iNy && idy<iNx && idz<(iNz-1))
{
int index = idx + __mul24(idy, iNy) + __mul24(idz+1, SZF);
pfbz[index] = __fmul_rz(__fmul_rz(__fadd_rz(pfgk[index], pfgk[index-SZF]), 0.5), pfbz[index]);
}
}
static __global__ void krnl_7(float *pfbx, float *pfby, float *pfbz, float *pfdv,
float *pfps, float *pfpt, float *pfu, float *FPS,
float *pfCs, float *pfCt, float tcc,
int iNx, int iNy, int iNz, int SZF,
int blocksInY, float invBlocksInY)
{
int blockIdxz = __float2uint_rd(blockIdx.y * invBlocksInY);
int blockIdxy = blockIdx.y - __umul24(blockIdxz,blocksInY);
int idx = __mul24(blockIdx.x,blockDim.x)+threadIdx.x;
int idy = __mul24(blockIdxy,blockDim.y)+threadIdx.y;
int idz = __mul24(blockIdxz,blockDim.z)+threadIdx.z;
float fpt;
if( idx<iNy && idy<iNx && idz<iNz)
{
int index = idx + __mul24(idy, iNy) + __mul24(idz, SZF);
pfdv[index] = __fadd_rz(__fadd_rz(__fadd_rz(__fadd_rz(__fadd_rz(
pfbx[index+iNy], - pfbx[index]),
pfby[index+1]), - pfby[index]),
pfbz[index+SZF]), - pfbz[index]);
fpt = __fadd_rz(__fadd_rz(__fadd_rz(pfpt[index], - pfu[index]), pfdv[index]), tcc);
pfps[index] = MIN(fpt, pfCs[index]);
fpt = __fadd_rz(__fadd_rz(pfps[index], pfu[index]), - pfdv[index]);
pfpt[index] = MIN(fpt, pfCt[index]);
fpt = __fadd_rz(__fadd_rz(pfpt[index], pfdv[index]), - pfps[index]);
FPS[index] = fabsf(fpt);
pfu[index] = __fadd_rz(pfu[index], -fpt);
}
}
|
51cb1632960f51b6f0cda2fe08f5d5322af8f83e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "../helper/uint_util.hcu"
#include "../helper/float_util.hcu"
//#include "common/error.h"
#include <math.h>
#define PI 3.14159265
#define IMAGE_SIZE 1024
#define CHECK_ERR(x) \
if (x != hipSuccess) { \
fprintf(stderr,"%s in %s at line %d\n", \
hipGetErrorString(err),__FILE__,__LINE__); \
exit(-1); \
}
unsigned int radius;
float sigma_spatial;
float sigma_range;
typedef struct {
float R;
float G;
float B;
} RGB;
__host__ __device__
float gaussian1d(float x, float sigma)
{
float variance = pow(sigma,2);
float exponent = -pow(x,2)/(2*variance);
return expf(exponent) / sqrt(2 * PI * variance);
}
__host__ __device__
float gaussian2d(float x, float y, float sigma)
{
float variance = pow(sigma,2);
float exponent = -(pow(x,2) + pow(y,2))/(2*variance);
return expf(exponent) / (2 * PI * variance);
}
__global__
void bilateralFilterGPU_v1(float3* input, float3* output, uint2 dims, int radius, float sigma_spatial, float sigma_range)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
uint2 pos = idx_to_co(idx,dims);
int img_x = pos.x;
int img_y = pos.y;
if(img_x >= dims.x || img_y >= dims.y) return;
float3 currentColor = input[idx];
float3 res = make_float3(0.0f,0.0f,0.0f);
float3 normalization = make_float3(0.0f,0.0f,0.0f);;
for(int i = -radius; i <= radius; i++) {
for(int j = -radius; j <= radius; j++) {
int x_sample = img_x+i;
int y_sample = img_y+j;
//mirror edges
if( x_sample < 0) x_sample = -x_sample;
if( y_sample < 0) y_sample = -y_sample;
if( x_sample > dims.x - 1) x_sample = dims.x - 1 - i;
if( y_sample > dims.y - 1) y_sample = dims.y - 1 - j;
float3 tmpColor = input[co_to_idx(make_uint2(x_sample,y_sample),dims)];
float gauss_spatial = gaussian2d(i,j,sigma_spatial);
float3 gauss_range;
gauss_range.x = gaussian1d(currentColor.x - tmpColor.x, sigma_range);
gauss_range.y = gaussian1d(currentColor.y - tmpColor.y, sigma_range);
gauss_range.z = gaussian1d(currentColor.z - tmpColor.z, sigma_range);
float3 weight = gauss_spatial * gauss_range;
normalization = normalization + weight;
res = res + (tmpColor * weight);
}
}
res.x /= normalization.x;
res.y /= normalization.y;
res.z /= normalization.z;
output[idx] = res;
}
void bilateralFiltering_v1(RGB* data, int width, int height ,int radius, float sigma_spatial, float sigma_range,int NUM_THREADS) {
unsigned int numElements = width * height;
hipError_t err;
// copy data to device
float3* d_data;
err= hipMalloc( (void**) &d_data, numElements*sizeof(RGB));
CHECK_ERR(err);
err= hipMemcpy( d_data, data, numElements*sizeof(RGB), hipMemcpyHostToDevice );
CHECK_ERR(err);
//Output image
float3* d_result;
err= hipMalloc( (void**) &d_result, numElements*sizeof(RGB));
CHECK_ERR(err);
// setup dimensions of grid/blocks.
dim3 blockDim(NUM_THREADS,1,1);
dim3 gridDim((unsigned int) ceil((double)(numElements/blockDim.x)), 1, 1 );
// invoke kernel
hipLaunchKernelGGL(( bilateralFilterGPU_v1), dim3(7), dim3(NUM_THREADS) , 0, 0, d_data, d_result, make_uint2(width,height), radius, sigma_spatial, sigma_range);
// copy data to host
err= hipMemcpy( data, d_result, numElements*sizeof(RGB), hipMemcpyDeviceToHost );
CHECK_ERR(err);
hipFree(d_data);
hipFree(d_result);
}
/*(RGB*) populateRGB(int width, int height){
int i=0;
int j=0;
srand (time(NULL));
RGB* colors = (RGB *) malloc (sizeof(RGB) * width * height);
for(int i = 0; i < numElements; i++)
{
RGB c;
c.R = rand() % 255;
c.G = rand() % 255;
c.B = rand() % 255;
colors[i] = c;
}
return colors;
}
*/
int main(int argc, char** argv) {
if (argc != 4){
printf("invalid parameters, use: <WIDTH> <HEIGHT> <NUM_THREADS>\n");
return -1;
}
//const unsigned int channels = 1;//atoi(argv[1]);
StopWatchInterface *hTimer = NULL;
unsigned int width = atoi(argv[1]);
unsigned int height = atoi(argv[2]);
sdkCreateTimer(&hTimer);
radius = 1;//atoi(argv[2]);
sigma_spatial = 1.0; //(float)atof(argv[3]);
sigma_range = 1.0; //(float)atof(argv[4]);
//int TEST_RUN = atoi(argv[1]);
int NUM_THREADS = atoi(argv[3]);
int NUM_TEST =10;// atoi(argv[2]);
//for(int i = 0; i < TEST_RUN; i++)
//{
RGB *data=(RGB *) malloc (sizeof(RGB) * width * height);
for(int j = 0; j < (width * height); j++)
{
RGB c;
c.R = rand() % 255;
c.G = rand() % 255;
c.B = rand() % 255;
data[j] = c;
}
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(int k = 0; k < NUM_TEST; k++)
{
bilateralFiltering_v1(data,width,height,radius,sigma_spatial,sigma_range,NUM_THREADS);
}
sdkStopTimer(&hTimer);
double dAvgSecs = 1.0e-3 * (double)sdkGetTimerValue(&hTimer) / (double) NUM_TEST;
printf("%.5f\t", dAvgSecs);
free(data);
width *= 2;
// }
sdkDeleteTimer(&hTimer);
return EXIT_SUCCESS;
}
| 51cb1632960f51b6f0cda2fe08f5d5322af8f83e.cu | #include <stdlib.h>
#include <stdio.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include "../helper/uint_util.hcu"
#include "../helper/float_util.hcu"
//#include "common/error.h"
#include <math.h>
#define PI 3.14159265
#define IMAGE_SIZE 1024
#define CHECK_ERR(x) \
if (x != cudaSuccess) { \
fprintf(stderr,"%s in %s at line %d\n", \
cudaGetErrorString(err),__FILE__,__LINE__); \
exit(-1); \
}
unsigned int radius;
float sigma_spatial;
float sigma_range;
typedef struct {
float R;
float G;
float B;
} RGB;
__host__ __device__
float gaussian1d(float x, float sigma)
{
float variance = pow(sigma,2);
float exponent = -pow(x,2)/(2*variance);
return expf(exponent) / sqrt(2 * PI * variance);
}
__host__ __device__
float gaussian2d(float x, float y, float sigma)
{
float variance = pow(sigma,2);
float exponent = -(pow(x,2) + pow(y,2))/(2*variance);
return expf(exponent) / (2 * PI * variance);
}
__global__
void bilateralFilterGPU_v1(float3* input, float3* output, uint2 dims, int radius, float sigma_spatial, float sigma_range)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
uint2 pos = idx_to_co(idx,dims);
int img_x = pos.x;
int img_y = pos.y;
if(img_x >= dims.x || img_y >= dims.y) return;
float3 currentColor = input[idx];
float3 res = make_float3(0.0f,0.0f,0.0f);
float3 normalization = make_float3(0.0f,0.0f,0.0f);;
for(int i = -radius; i <= radius; i++) {
for(int j = -radius; j <= radius; j++) {
int x_sample = img_x+i;
int y_sample = img_y+j;
//mirror edges
if( x_sample < 0) x_sample = -x_sample;
if( y_sample < 0) y_sample = -y_sample;
if( x_sample > dims.x - 1) x_sample = dims.x - 1 - i;
if( y_sample > dims.y - 1) y_sample = dims.y - 1 - j;
float3 tmpColor = input[co_to_idx(make_uint2(x_sample,y_sample),dims)];
float gauss_spatial = gaussian2d(i,j,sigma_spatial);
float3 gauss_range;
gauss_range.x = gaussian1d(currentColor.x - tmpColor.x, sigma_range);
gauss_range.y = gaussian1d(currentColor.y - tmpColor.y, sigma_range);
gauss_range.z = gaussian1d(currentColor.z - tmpColor.z, sigma_range);
float3 weight = gauss_spatial * gauss_range;
normalization = normalization + weight;
res = res + (tmpColor * weight);
}
}
res.x /= normalization.x;
res.y /= normalization.y;
res.z /= normalization.z;
output[idx] = res;
}
void bilateralFiltering_v1(RGB* data, int width, int height ,int radius, float sigma_spatial, float sigma_range,int NUM_THREADS) {
unsigned int numElements = width * height;
cudaError_t err;
// copy data to device
float3* d_data;
err= cudaMalloc( (void**) &d_data, numElements*sizeof(RGB));
CHECK_ERR(err);
err= cudaMemcpy( d_data, data, numElements*sizeof(RGB), cudaMemcpyHostToDevice );
CHECK_ERR(err);
//Output image
float3* d_result;
err= cudaMalloc( (void**) &d_result, numElements*sizeof(RGB));
CHECK_ERR(err);
// setup dimensions of grid/blocks.
dim3 blockDim(NUM_THREADS,1,1);
dim3 gridDim((unsigned int) ceil((double)(numElements/blockDim.x)), 1, 1 );
// invoke kernel
bilateralFilterGPU_v1<<< 7, NUM_THREADS >>>( d_data, d_result, make_uint2(width,height), radius, sigma_spatial, sigma_range);
// copy data to host
err= cudaMemcpy( data, d_result, numElements*sizeof(RGB), cudaMemcpyDeviceToHost );
CHECK_ERR(err);
cudaFree(d_data);
cudaFree(d_result);
}
/*(RGB*) populateRGB(int width, int height){
int i=0;
int j=0;
srand (time(NULL));
RGB* colors = (RGB *) malloc (sizeof(RGB) * width * height);
for(int i = 0; i < numElements; i++)
{
RGB c;
c.R = rand() % 255;
c.G = rand() % 255;
c.B = rand() % 255;
colors[i] = c;
}
return colors;
}
*/
int main(int argc, char** argv) {
if (argc != 4){
printf("invalid parameters, use: <WIDTH> <HEIGHT> <NUM_THREADS>\n");
return -1;
}
//const unsigned int channels = 1;//atoi(argv[1]);
StopWatchInterface *hTimer = NULL;
unsigned int width = atoi(argv[1]);
unsigned int height = atoi(argv[2]);
sdkCreateTimer(&hTimer);
radius = 1;//atoi(argv[2]);
sigma_spatial = 1.0; //(float)atof(argv[3]);
sigma_range = 1.0; //(float)atof(argv[4]);
//int TEST_RUN = atoi(argv[1]);
int NUM_THREADS = atoi(argv[3]);
int NUM_TEST =10;// atoi(argv[2]);
//for(int i = 0; i < TEST_RUN; i++)
//{
RGB *data=(RGB *) malloc (sizeof(RGB) * width * height);
for(int j = 0; j < (width * height); j++)
{
RGB c;
c.R = rand() % 255;
c.G = rand() % 255;
c.B = rand() % 255;
data[j] = c;
}
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for(int k = 0; k < NUM_TEST; k++)
{
bilateralFiltering_v1(data,width,height,radius,sigma_spatial,sigma_range,NUM_THREADS);
}
sdkStopTimer(&hTimer);
double dAvgSecs = 1.0e-3 * (double)sdkGetTimerValue(&hTimer) / (double) NUM_TEST;
printf("%.5f\t", dAvgSecs);
free(data);
width *= 2;
// }
sdkDeleteTimer(&hTimer);
return EXIT_SUCCESS;
}
|
1a5b94a736d0477a731e247139b50d370ecc3409.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backref_re_hip.cuh"
#include <strings/regex/regex.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <regex>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Parse the back-ref index and position values from a given replace format.
*
* The backref numbers are expected to be 1-based.
*
* Returns a modified string without back-ref indicators and a vector of backref
* byte position pairs.
* ```
* Example:
* for input string: 'hello \2 and \1'
* the returned pairs: (2,6),(1,11)
* returned string is: 'hello and '
* ```
*/
std::pair<std::string, std::vector<backref_type>> parse_backrefs(std::string const& repl)
{
std::vector<backref_type> backrefs;
std::string str = repl; // make a modifiable copy
std::smatch m;
std::regex ex("(\\\\\\d+)"); // this searches for backslash-number(s); example "\1"
std::string rtn; // result without refs
size_type byte_offset = 0;
while (std::regex_search(str, m, ex)) {
if (m.size() == 0) break;
std::string const backref = m[0];
size_type const position = static_cast<size_type>(m.position(0));
size_type const length = static_cast<size_type>(backref.length());
byte_offset += position;
size_type const index = std::atoi(backref.c_str() + 1); // back-ref index number
CUDF_EXPECTS(index > 0, "Back-reference numbers must be greater than 0");
rtn += str.substr(0, position);
str = str.substr(position + length);
backrefs.push_back({index, byte_offset});
}
if (!str.empty()) // add the remainder
rtn += str; // of the string
return {rtn, backrefs};
}
} // namespace
//
std::unique_ptr<column> replace_with_backrefs(
strings_column_view const& strings,
std::string const& pattern,
std::string const& repl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(!pattern.empty(), "Parameter pattern must not be empty");
CUDF_EXPECTS(!repl.empty(), "Parameter repl must not be empty");
auto d_strings = column_device_view::create(strings.parent(), stream);
// compile regex into device object
auto d_prog = reprog_device::create(pattern, get_character_flags_table(), strings.size(), stream);
auto const regex_insts = d_prog->insts_counts();
// parse the repl string for backref indicators
auto const parse_result = parse_backrefs(repl);
rmm::device_uvector<backref_type> backrefs(parse_result.second.size(), stream);
CUDA_TRY(hipMemcpyAsync(backrefs.data(),
parse_result.second.data(),
sizeof(backref_type) * backrefs.size(),
hipMemcpyHostToDevice,
stream.value()));
string_scalar repl_scalar(parse_result.first, true, stream);
string_view const d_repl_template = repl_scalar.value();
using BackRefIterator = decltype(backrefs.begin());
// create child columns
children_pair children = [&] {
// Each invocation is predicated on the stack size
// which is dependent on the number of regex instructions
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS)) {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_SMALL>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
strings.null_count(),
stream,
mr);
} else if (regex_insts <= RX_MEDIUM_INSTS)
return replace_with_backrefs_medium(
*d_strings, *d_prog, d_repl_template, backrefs, strings.null_count(), stream, mr);
else
return replace_with_backrefs_large(
*d_strings, *d_prog, d_repl_template, backrefs, strings.null_count(), stream, mr);
}();
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace_with_backrefs(strings_column_view const& strings,
std::string const& pattern,
std::string const& repl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_with_backrefs(strings, pattern, repl, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| 1a5b94a736d0477a731e247139b50d370ecc3409.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backref_re.cuh"
#include <strings/regex/regex.cuh>
#include <strings/utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/replace_re.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <regex>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Parse the back-ref index and position values from a given replace format.
*
* The backref numbers are expected to be 1-based.
*
* Returns a modified string without back-ref indicators and a vector of backref
* byte position pairs.
* ```
* Example:
* for input string: 'hello \2 and \1'
* the returned pairs: (2,6),(1,11)
* returned string is: 'hello and '
* ```
*/
std::pair<std::string, std::vector<backref_type>> parse_backrefs(std::string const& repl)
{
std::vector<backref_type> backrefs;
std::string str = repl; // make a modifiable copy
std::smatch m;
std::regex ex("(\\\\\\d+)"); // this searches for backslash-number(s); example "\1"
std::string rtn; // result without refs
size_type byte_offset = 0;
while (std::regex_search(str, m, ex)) {
if (m.size() == 0) break;
std::string const backref = m[0];
size_type const position = static_cast<size_type>(m.position(0));
size_type const length = static_cast<size_type>(backref.length());
byte_offset += position;
size_type const index = std::atoi(backref.c_str() + 1); // back-ref index number
CUDF_EXPECTS(index > 0, "Back-reference numbers must be greater than 0");
rtn += str.substr(0, position);
str = str.substr(position + length);
backrefs.push_back({index, byte_offset});
}
if (!str.empty()) // add the remainder
rtn += str; // of the string
return {rtn, backrefs};
}
} // namespace
//
std::unique_ptr<column> replace_with_backrefs(
strings_column_view const& strings,
std::string const& pattern,
std::string const& repl,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
if (strings.is_empty()) return make_empty_strings_column(stream, mr);
CUDF_EXPECTS(!pattern.empty(), "Parameter pattern must not be empty");
CUDF_EXPECTS(!repl.empty(), "Parameter repl must not be empty");
auto d_strings = column_device_view::create(strings.parent(), stream);
// compile regex into device object
auto d_prog = reprog_device::create(pattern, get_character_flags_table(), strings.size(), stream);
auto const regex_insts = d_prog->insts_counts();
// parse the repl string for backref indicators
auto const parse_result = parse_backrefs(repl);
rmm::device_uvector<backref_type> backrefs(parse_result.second.size(), stream);
CUDA_TRY(cudaMemcpyAsync(backrefs.data(),
parse_result.second.data(),
sizeof(backref_type) * backrefs.size(),
cudaMemcpyHostToDevice,
stream.value()));
string_scalar repl_scalar(parse_result.first, true, stream);
string_view const d_repl_template = repl_scalar.value();
using BackRefIterator = decltype(backrefs.begin());
// create child columns
children_pair children = [&] {
// Each invocation is predicated on the stack size
// which is dependent on the number of regex instructions
if ((regex_insts > MAX_STACK_INSTS) || (regex_insts <= RX_SMALL_INSTS)) {
return make_strings_children(
backrefs_fn<BackRefIterator, RX_STACK_SMALL>{
*d_strings, *d_prog, d_repl_template, backrefs.begin(), backrefs.end()},
strings.size(),
strings.null_count(),
stream,
mr);
} else if (regex_insts <= RX_MEDIUM_INSTS)
return replace_with_backrefs_medium(
*d_strings, *d_prog, d_repl_template, backrefs, strings.null_count(), stream, mr);
else
return replace_with_backrefs_large(
*d_strings, *d_prog, d_repl_template, backrefs, strings.null_count(), stream, mr);
}();
return make_strings_column(strings.size(),
std::move(children.first),
std::move(children.second),
strings.null_count(),
cudf::detail::copy_bitmask(strings.parent(), stream, mr),
stream,
mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace_with_backrefs(strings_column_view const& strings,
std::string const& pattern,
std::string const& repl,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::replace_with_backrefs(strings, pattern, repl, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
87af2fd690aca13c89d07dcb9e8ee08c194580a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <cmath>
#include <nestedtensor/csrc/cuda/padding.h>
#include <stdio.h>
namespace nested_tensor {
namespace cuda {
template<typename T>
__global__
void add_padding_1(
const T* input,
T* output,
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
int output_sizes_1,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int batch_input_offset = offsets[batch_id];
const int* sizes_i = input_sizes + batch_id * input_dim;
const int batch_output_offset = batch_id * output_sizes_1;
for (int ii = 0; ii < (output_sizes_1 / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int output_offset = batch_output_offset + i;
if (i < sizes_i[0]) {
output[output_offset] = input[batch_input_offset + i];
} else {
output[output_offset] = padding_value;
}
}
const int i = (output_sizes_1 / grainsize) * grainsize + tid;
if (i < output_sizes_1) {
const int output_offset = batch_output_offset + i;
if (i < sizes_i[0]) {
output[output_offset] = input[batch_input_offset + i];
} else {
output[output_offset] = padding_value;
}
}
}
template<typename T>
__global__
void add_padding_2(
const T* input,
T* output,
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
int output_sizes_1,
int output_sizes_2,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int offset = offsets[batch_id];
const int* sizes_i = input_sizes + batch_id * input_dim;
const int output_offset = batch_id * output_sizes_1 * output_sizes_2;
const int output_numel = output_sizes_1 * output_sizes_2;
for (int ii = 0; ii < (output_numel / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int i0 = i / (output_sizes_2);
const int i1 = i % output_sizes_2;
if (i0 < sizes_i[0] && i1 < sizes_i[1]) {
const int input_offset = offset + i0 * sizes_i[1] + i1;
output[output_offset + i] = input[input_offset];
} else {
output[output_offset + i] = padding_value;
}
}
const int i = (output_numel / grainsize) * grainsize + tid;
if (i < output_numel) {
const int i0 = i / (output_sizes_2);
const int i1 = i % output_sizes_2;
if (i0 < sizes_i[0] && i1 < sizes_i[1]) {
const int input_offset = offset + i0 * sizes_i[1] + i1;
output[output_offset + i] = input[input_offset];
} else {
output[output_offset + i] = padding_value;
}
}
}
template<typename T>
__global__
void add_padding_3(
const T* input,
T* output,
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
int output_sizes_1,
int output_sizes_2,
int output_sizes_3,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int offset = offsets[batch_id];
const int* sizes_i = input_sizes + batch_id * input_dim;
const int output_offset = batch_id * output_sizes_1 * output_sizes_2 * output_sizes_3;
const int output_numel = output_sizes_1 * output_sizes_2 * output_sizes_3;
for (int ii = 0; ii < (output_numel / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int i0 = i / (output_sizes_2 * output_sizes_3);
const int i1 = (i % (output_sizes_2 * output_sizes_3)) / output_sizes_3;
const int i2 = i % output_sizes_3;
if (i0 < sizes_i[0] && i1 < sizes_i[1] && i2 < sizes_i[2]) {
const int input_offset = offset + i0 * (sizes_i[1] * sizes_i[2]) + i1 * sizes_i[2] + i2;
output[output_offset + i] = input[input_offset];
} else {
output[output_offset + i] = padding_value;
}
}
const int i = (output_numel / grainsize) * grainsize + tid;
if (i < output_numel) {
const int i0 = i / (output_sizes_2 * output_sizes_3);
const int i1 = (i % (output_sizes_2 * output_sizes_3)) / output_sizes_3;
const int i2 = i % output_sizes_3;
if (i0 < sizes_i[0] && i1 < sizes_i[1] && i2 < sizes_i[2]) {
const int input_offset = offset + i0 * (sizes_i[1] * sizes_i[2]) + i1 * sizes_i[2] + i2;
output[output_offset + i] = input[input_offset];
} else {
output[output_offset + i] = padding_value;
}
}
}
template<typename T>
void add_padding_kernelLauncher(
T* input, // [batch_size x None]
T* output, // [batch_size x max(input.nested_size(1)) x inner_size]
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
std::vector<int64_t> output_sizes,
const int batch_size,
const hipStream_t stream)
{
dim3 grid;
grid.x = batch_size;
grid.y = 16;
if (input_dim == 1) {
hipLaunchKernelGGL(( add_padding_1<T>), dim3(grid), dim3(256), 0, stream,
input,
output,
padding_value,
offsets,
input_sizes,
input_dim,
output_sizes[1],
batch_size);
}
if (input_dim == 2) {
hipLaunchKernelGGL(( add_padding_2<T>), dim3(grid), dim3(256), 0, stream,
input,
output,
padding_value,
offsets,
input_sizes,
input_dim,
output_sizes[1],
output_sizes[2],
batch_size);
}
if (input_dim == 3) {
hipLaunchKernelGGL(( add_padding_3<T>), dim3(grid), dim3(256), 0, stream,
input,
output,
padding_value,
offsets,
input_sizes,
input_dim,
output_sizes[1],
output_sizes[2],
output_sizes[3],
batch_size);
}
}
template void add_padding_kernelLauncher<float>(
float* input,
float* output,
float padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
std::vector<int64_t> output_sizes,
const int batch_size,
const hipStream_t stream);
template void add_padding_kernelLauncher<c10::Half>(
c10::Half* input,
c10::Half* output,
c10::Half padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
std::vector<int64_t> output_sizes,
const int batch_size,
const hipStream_t stream);
template<typename T>
__global__
void add_padding_mask(
const T* input,
T* output,
int* output_mask,
const int* offsets,
const int batch_size,
const int mask_stride,
const int output_stride,
const int inner_size)
{
const int batch_id = blockIdx.x;
for (int i = 0; i < (offsets[batch_id + 1] - offsets[batch_id]); i++) {
output_mask[batch_id*mask_stride + i] = 1;
}
for (int i = 0; i < (offsets[batch_id + 1] - offsets[batch_id]) * inner_size; i++) {
output[batch_id * output_stride + i] = input[offsets[batch_id] * inner_size + i];
}
}
template<typename T>
void add_padding_mask_kernelLauncher(
T* input, // [batch_size x None]
T* output, // [batch_size x max(input.nested_size(1)) x inner_size]
int* output_mask, // [batch_size x max(input.nested_size(1))]
const int* offsets, // [batch_size]
const int batch_size,
const int mask_stride,
const int output_stride,
const int inner_size,
const hipStream_t stream)
{
dim3 grid;
grid.x = batch_size;
hipLaunchKernelGGL(( add_padding_mask<T>), dim3(grid), dim3(1), 0, stream,
input,
output,
output_mask,
offsets,
batch_size,
mask_stride,
output_stride,
inner_size);
}
template void add_padding_mask_kernelLauncher<float>(
float* input,
float* output,
int* output_mask,
const int* offsets,
const int batch_size,
const int mask_stride,
const int output_stride,
const int inner_size,
const hipStream_t stream);
template void add_padding_mask_kernelLauncher<c10::Half>(
c10::Half* input,
c10::Half* output,
int* output_mask,
const int* offsets,
const int batch_size,
const int mask_stride,
const int output_stride,
const int inner_size,
const hipStream_t stream);
template<typename T>
__global__
void remove_padding_2(
const T* input,
T* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int offset = offsets[batch_id];
const int* sizes_i = output_sizes + batch_id * output_dim;
const int numel_i = sizes_i[0] * sizes_i[1];
int input_offset = batch_id * input_sizes[1] * input_sizes[2];
for (int ii = 0; ii < (numel_i / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int i0 = i / sizes_i[1];
const int i1 = i % sizes_i[1];
const int i0_offset = i0 * input_sizes[2];
output[offset + i] = input[input_offset + i0_offset + i1];
}
const int i = (numel_i / grainsize) * grainsize + tid;
if (i < numel_i) {
const int i0 = i / sizes_i[1];
const int i1 = i % sizes_i[1];
const int i0_offset = i0 * input_sizes[2];
output[offset + i] = input[input_offset + i0_offset + i1];
}
}
template<typename T>
__global__
void remove_padding(
const T* input,
T* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int offset = offsets[batch_id];
const int* sizes_i = output_sizes + batch_id * output_dim;
const int numel_i = sizes_i[0] * sizes_i[1] * sizes_i[2];
int input_offset = batch_id * input_sizes[1] * input_sizes[2] * input_sizes[3];
for (int ii = 0; ii < (numel_i / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int i0 = i / (sizes_i[1] * sizes_i[2]);
const int i1 = (i % (sizes_i[1] * sizes_i[2])) / sizes_i[2];
const int i2 = i % sizes_i[2];
const int i0_offset = i0 * input_sizes[2] * input_sizes[3];
const int i1_offset = i1 * input_sizes[3];
output[offset + i] = input[input_offset + i0_offset + i1_offset + i2];
}
const int i = (numel_i / grainsize) * grainsize + tid;
if (i < numel_i) {
const int i0 = i / (sizes_i[1] * sizes_i[2]);
const int i1 = (i % (sizes_i[1] * sizes_i[2])) / sizes_i[2];
const int i2 = i % sizes_i[2];
const int i0_offset = i0 * input_sizes[2] * input_sizes[3];
const int i1_offset = i1 * input_sizes[3];
output[offset + i] = input[input_offset + i0_offset + i1_offset + i2];
}
}
template<typename T>
void remove_padding_kernelLauncher(
const T* input,
T* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size,
const hipStream_t stream)
{
dim3 grid;
grid.x = batch_size;
grid.y = 16;
if (output_dim == 2) {
hipLaunchKernelGGL(( remove_padding_2<T>), dim3(grid), dim3(256), 0, stream,
input,
output,
offsets,
input_sizes,
output_sizes,
output_dim,
batch_size);
} else {
hipLaunchKernelGGL(( remove_padding<T>), dim3(grid), dim3(256), 0, stream,
input,
output,
offsets,
input_sizes,
output_sizes,
output_dim,
batch_size);
}
}
template void remove_padding_kernelLauncher<float>(
const float* input,
float* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size,
const hipStream_t stream);
template void remove_padding_kernelLauncher<c10::Half>(
const c10::Half* input,
c10::Half* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size,
const hipStream_t stream);
}
}
| 87af2fd690aca13c89d07dcb9e8ee08c194580a4.cu | #include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cmath>
#include <nestedtensor/csrc/cuda/padding.h>
#include <stdio.h>
namespace nested_tensor {
namespace cuda {
template<typename T>
__global__
void add_padding_1(
const T* input,
T* output,
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
int output_sizes_1,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int batch_input_offset = offsets[batch_id];
const int* sizes_i = input_sizes + batch_id * input_dim;
const int batch_output_offset = batch_id * output_sizes_1;
for (int ii = 0; ii < (output_sizes_1 / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int output_offset = batch_output_offset + i;
if (i < sizes_i[0]) {
output[output_offset] = input[batch_input_offset + i];
} else {
output[output_offset] = padding_value;
}
}
const int i = (output_sizes_1 / grainsize) * grainsize + tid;
if (i < output_sizes_1) {
const int output_offset = batch_output_offset + i;
if (i < sizes_i[0]) {
output[output_offset] = input[batch_input_offset + i];
} else {
output[output_offset] = padding_value;
}
}
}
template<typename T>
__global__
void add_padding_2(
const T* input,
T* output,
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
int output_sizes_1,
int output_sizes_2,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int offset = offsets[batch_id];
const int* sizes_i = input_sizes + batch_id * input_dim;
const int output_offset = batch_id * output_sizes_1 * output_sizes_2;
const int output_numel = output_sizes_1 * output_sizes_2;
for (int ii = 0; ii < (output_numel / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int i0 = i / (output_sizes_2);
const int i1 = i % output_sizes_2;
if (i0 < sizes_i[0] && i1 < sizes_i[1]) {
const int input_offset = offset + i0 * sizes_i[1] + i1;
output[output_offset + i] = input[input_offset];
} else {
output[output_offset + i] = padding_value;
}
}
const int i = (output_numel / grainsize) * grainsize + tid;
if (i < output_numel) {
const int i0 = i / (output_sizes_2);
const int i1 = i % output_sizes_2;
if (i0 < sizes_i[0] && i1 < sizes_i[1]) {
const int input_offset = offset + i0 * sizes_i[1] + i1;
output[output_offset + i] = input[input_offset];
} else {
output[output_offset + i] = padding_value;
}
}
}
template<typename T>
__global__
void add_padding_3(
const T* input,
T* output,
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
int output_sizes_1,
int output_sizes_2,
int output_sizes_3,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int offset = offsets[batch_id];
const int* sizes_i = input_sizes + batch_id * input_dim;
const int output_offset = batch_id * output_sizes_1 * output_sizes_2 * output_sizes_3;
const int output_numel = output_sizes_1 * output_sizes_2 * output_sizes_3;
for (int ii = 0; ii < (output_numel / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int i0 = i / (output_sizes_2 * output_sizes_3);
const int i1 = (i % (output_sizes_2 * output_sizes_3)) / output_sizes_3;
const int i2 = i % output_sizes_3;
if (i0 < sizes_i[0] && i1 < sizes_i[1] && i2 < sizes_i[2]) {
const int input_offset = offset + i0 * (sizes_i[1] * sizes_i[2]) + i1 * sizes_i[2] + i2;
output[output_offset + i] = input[input_offset];
} else {
output[output_offset + i] = padding_value;
}
}
const int i = (output_numel / grainsize) * grainsize + tid;
if (i < output_numel) {
const int i0 = i / (output_sizes_2 * output_sizes_3);
const int i1 = (i % (output_sizes_2 * output_sizes_3)) / output_sizes_3;
const int i2 = i % output_sizes_3;
if (i0 < sizes_i[0] && i1 < sizes_i[1] && i2 < sizes_i[2]) {
const int input_offset = offset + i0 * (sizes_i[1] * sizes_i[2]) + i1 * sizes_i[2] + i2;
output[output_offset + i] = input[input_offset];
} else {
output[output_offset + i] = padding_value;
}
}
}
template<typename T>
void add_padding_kernelLauncher(
T* input, // [batch_size x None]
T* output, // [batch_size x max(input.nested_size(1)) x inner_size]
T padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
std::vector<int64_t> output_sizes,
const int batch_size,
const cudaStream_t stream)
{
dim3 grid;
grid.x = batch_size;
grid.y = 16;
if (input_dim == 1) {
add_padding_1<T><<<grid, 256, 0, stream>>>(
input,
output,
padding_value,
offsets,
input_sizes,
input_dim,
output_sizes[1],
batch_size);
}
if (input_dim == 2) {
add_padding_2<T><<<grid, 256, 0, stream>>>(
input,
output,
padding_value,
offsets,
input_sizes,
input_dim,
output_sizes[1],
output_sizes[2],
batch_size);
}
if (input_dim == 3) {
add_padding_3<T><<<grid, 256, 0, stream>>>(
input,
output,
padding_value,
offsets,
input_sizes,
input_dim,
output_sizes[1],
output_sizes[2],
output_sizes[3],
batch_size);
}
}
template void add_padding_kernelLauncher<float>(
float* input,
float* output,
float padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
std::vector<int64_t> output_sizes,
const int batch_size,
const cudaStream_t stream);
template void add_padding_kernelLauncher<c10::Half>(
c10::Half* input,
c10::Half* output,
c10::Half padding_value,
const int* offsets,
const int* input_sizes,
int input_dim,
std::vector<int64_t> output_sizes,
const int batch_size,
const cudaStream_t stream);
template<typename T>
__global__
void add_padding_mask(
const T* input,
T* output,
int* output_mask,
const int* offsets,
const int batch_size,
const int mask_stride,
const int output_stride,
const int inner_size)
{
const int batch_id = blockIdx.x;
for (int i = 0; i < (offsets[batch_id + 1] - offsets[batch_id]); i++) {
output_mask[batch_id*mask_stride + i] = 1;
}
for (int i = 0; i < (offsets[batch_id + 1] - offsets[batch_id]) * inner_size; i++) {
output[batch_id * output_stride + i] = input[offsets[batch_id] * inner_size + i];
}
}
template<typename T>
void add_padding_mask_kernelLauncher(
T* input, // [batch_size x None]
T* output, // [batch_size x max(input.nested_size(1)) x inner_size]
int* output_mask, // [batch_size x max(input.nested_size(1))]
const int* offsets, // [batch_size]
const int batch_size,
const int mask_stride,
const int output_stride,
const int inner_size,
const cudaStream_t stream)
{
dim3 grid;
grid.x = batch_size;
add_padding_mask<T><<<grid, 1, 0, stream>>>(
input,
output,
output_mask,
offsets,
batch_size,
mask_stride,
output_stride,
inner_size);
}
template void add_padding_mask_kernelLauncher<float>(
float* input,
float* output,
int* output_mask,
const int* offsets,
const int batch_size,
const int mask_stride,
const int output_stride,
const int inner_size,
const cudaStream_t stream);
template void add_padding_mask_kernelLauncher<c10::Half>(
c10::Half* input,
c10::Half* output,
int* output_mask,
const int* offsets,
const int batch_size,
const int mask_stride,
const int output_stride,
const int inner_size,
const cudaStream_t stream);
template<typename T>
__global__
void remove_padding_2(
const T* input,
T* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int offset = offsets[batch_id];
const int* sizes_i = output_sizes + batch_id * output_dim;
const int numel_i = sizes_i[0] * sizes_i[1];
int input_offset = batch_id * input_sizes[1] * input_sizes[2];
for (int ii = 0; ii < (numel_i / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int i0 = i / sizes_i[1];
const int i1 = i % sizes_i[1];
const int i0_offset = i0 * input_sizes[2];
output[offset + i] = input[input_offset + i0_offset + i1];
}
const int i = (numel_i / grainsize) * grainsize + tid;
if (i < numel_i) {
const int i0 = i / sizes_i[1];
const int i1 = i % sizes_i[1];
const int i0_offset = i0 * input_sizes[2];
output[offset + i] = input[input_offset + i0_offset + i1];
}
}
template<typename T>
__global__
void remove_padding(
const T* input,
T* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size)
{
const int batch_id = blockIdx.x;
const int grid_id = blockIdx.y;
const int tid = threadIdx.x + grid_id * 256;
const int grainsize = 16 * 256;
const int offset = offsets[batch_id];
const int* sizes_i = output_sizes + batch_id * output_dim;
const int numel_i = sizes_i[0] * sizes_i[1] * sizes_i[2];
int input_offset = batch_id * input_sizes[1] * input_sizes[2] * input_sizes[3];
for (int ii = 0; ii < (numel_i / grainsize); ii++) {
const int i = ii * grainsize + tid;
const int i0 = i / (sizes_i[1] * sizes_i[2]);
const int i1 = (i % (sizes_i[1] * sizes_i[2])) / sizes_i[2];
const int i2 = i % sizes_i[2];
const int i0_offset = i0 * input_sizes[2] * input_sizes[3];
const int i1_offset = i1 * input_sizes[3];
output[offset + i] = input[input_offset + i0_offset + i1_offset + i2];
}
const int i = (numel_i / grainsize) * grainsize + tid;
if (i < numel_i) {
const int i0 = i / (sizes_i[1] * sizes_i[2]);
const int i1 = (i % (sizes_i[1] * sizes_i[2])) / sizes_i[2];
const int i2 = i % sizes_i[2];
const int i0_offset = i0 * input_sizes[2] * input_sizes[3];
const int i1_offset = i1 * input_sizes[3];
output[offset + i] = input[input_offset + i0_offset + i1_offset + i2];
}
}
template<typename T>
void remove_padding_kernelLauncher(
const T* input,
T* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size,
const cudaStream_t stream)
{
dim3 grid;
grid.x = batch_size;
grid.y = 16;
if (output_dim == 2) {
remove_padding_2<T><<<grid, 256, 0, stream>>>(
input,
output,
offsets,
input_sizes,
output_sizes,
output_dim,
batch_size);
} else {
remove_padding<T><<<grid, 256, 0, stream>>>(
input,
output,
offsets,
input_sizes,
output_sizes,
output_dim,
batch_size);
}
}
template void remove_padding_kernelLauncher<float>(
const float* input,
float* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size,
const cudaStream_t stream);
template void remove_padding_kernelLauncher<c10::Half>(
const c10::Half* input,
c10::Half* output,
const int* offsets,
const int* input_sizes,
const int* output_sizes,
int output_dim,
const int batch_size,
const cudaStream_t stream);
}
}
|
fce9c1f7984e6c35ef1762e550452c937e02f10e.hip | // !!! This is a file automatically generated by hipify!!!
// Author: Tobias Pltz, TU Darmstadt (tobias.ploetz@visinf.tu-darmstadt.de)
// This file is part of the implementation as described in the NIPS 2018 paper:
// Tobias Pltz and Stefan Roth, Neural Nearest Neighbors Networks.
// Please see the file LICENSE.txt for the license governing this code.
#include <math.h>
#include <vector>
#include "stdio.h"
#include "iostream"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <ATen/ATen.h>
#include <chrono>
using namespace std;
const int N_THREADS_N = 256;
const int N_THREADS_E = 1024 / N_THREADS_N;
__device__
void matmul1_xgrad(float *grad, float *mat_y, long *mat_i, float *mat_ox, int m, int n, int e, int o, int batch_size){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int batch = blockIdx.z *blockDim.z + threadIdx.z;
if (batch >= batch_size || row >= m || col >= o)
return;
int pos_i = (batch * m * o) + (row * o) + col;
int idx = mat_i[pos_i];
float g = grad[pos_i];
for (int j = 0; j < e; j++) {
int pos_y = (batch * m * e) + (row * e) + j;
int pos_ox = (batch * n * e) + (idx * e) + j;
atomicAdd(mat_ox + pos_ox, mat_y[pos_y] * g);
}
}
__device__
void matmul1_ygrad(float *grad, float *mat_x, long *mat_i, float *mat_o, int m, int n, int e, int o, int batch_size){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int batch = blockIdx.z *blockDim.z + threadIdx.z;
if (batch >= batch_size || row >= m || col >= e)
return;
float sum = 0.0;
for (int i = 0; i < o; i++) {
int pos_i = (batch * m * o) + (row * o) + i;
int xind = mat_i[pos_i];
int pos_x = (batch * n * e) + (xind * e) + col;
int pos_g = (batch * m * o) + (row * o) + i;
float g = grad[pos_g];
sum = sum + (mat_x[pos_x] * g);
}
int pos_o = (batch * m * e) + (row * e) + col;
mat_o[pos_o] = sum;
}
__global__
void matmul1_bwd_kernel_xgrad(float *gradients, float *mat_x, float *mat_y, long *mat_i, float *mat_ox, int m, int n, int e, int o, int batch_size){
matmul1_xgrad(gradients, mat_y, mat_i, mat_ox, m, n, e, o, batch_size);
}
__global__
void matmul1_bwd_kernel_ygrad(float *gradients, float *mat_x, float *mat_y, long *mat_i, float *mat_oy, int m, int n, int e, int o, int batch_size){
matmul1_ygrad(gradients, mat_x, mat_i, mat_oy, m, n, e, o, batch_size);
}
void matmul1_bwd_cuda(at::Tensor gradients, at::Tensor mat_x, at::Tensor mat_y, at::Tensor mat_i, at::Tensor out_x, at::Tensor out_y, int m, int n, int e, int o, int b){
// Set array and CUDA block/grid sizes
dim3 block(N_THREADS_E, N_THREADS_N, 1);
dim3 grid((int)ceil(((float)e)/N_THREADS_E), (int)ceil(((float)::max(n, m))/N_THREADS_N), b);
// Call kernel
hipLaunchKernelGGL(( matmul1_bwd_kernel_ygrad), dim3(grid), dim3(block), 0, 0, gradients.data<float>(), mat_x.data<float>(), mat_y.data<float>(), mat_i.data<long>(), out_y.data<float>(), m, n, e, o, b);
dim3 block_xgrad(N_THREADS_E, N_THREADS_N, 1);
dim3 grid_xgrad((int)ceil(((float)e)/N_THREADS_E), (int)ceil(((float)::max(n, m))/N_THREADS_N), b);
// Call kernel
hipLaunchKernelGGL(( matmul1_bwd_kernel_xgrad), dim3(grid_xgrad), dim3(block_xgrad), 0, 0, gradients.data<float>(), mat_x.data<float>(), mat_y.data<float>(), mat_i.data<long>(), out_x.data<float>(), m, n, e, o, b);
return;
}
| fce9c1f7984e6c35ef1762e550452c937e02f10e.cu | // Author: Tobias Plötz, TU Darmstadt (tobias.ploetz@visinf.tu-darmstadt.de)
// This file is part of the implementation as described in the NIPS 2018 paper:
// Tobias Plötz and Stefan Roth, Neural Nearest Neighbors Networks.
// Please see the file LICENSE.txt for the license governing this code.
#include <math.h>
#include <vector>
#include "stdio.h"
#include "iostream"
#include <cuda.h>
#include <cuda_runtime.h>
#include <ATen/ATen.h>
#include <chrono>
using namespace std;
const int N_THREADS_N = 256;
const int N_THREADS_E = 1024 / N_THREADS_N;
__device__
void matmul1_xgrad(float *grad, float *mat_y, long *mat_i, float *mat_ox, int m, int n, int e, int o, int batch_size){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int batch = blockIdx.z *blockDim.z + threadIdx.z;
if (batch >= batch_size || row >= m || col >= o)
return;
int pos_i = (batch * m * o) + (row * o) + col;
int idx = mat_i[pos_i];
float g = grad[pos_i];
for (int j = 0; j < e; j++) {
int pos_y = (batch * m * e) + (row * e) + j;
int pos_ox = (batch * n * e) + (idx * e) + j;
atomicAdd(mat_ox + pos_ox, mat_y[pos_y] * g);
}
}
__device__
void matmul1_ygrad(float *grad, float *mat_x, long *mat_i, float *mat_o, int m, int n, int e, int o, int batch_size){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int batch = blockIdx.z *blockDim.z + threadIdx.z;
if (batch >= batch_size || row >= m || col >= e)
return;
float sum = 0.0;
for (int i = 0; i < o; i++) {
int pos_i = (batch * m * o) + (row * o) + i;
int xind = mat_i[pos_i];
int pos_x = (batch * n * e) + (xind * e) + col;
int pos_g = (batch * m * o) + (row * o) + i;
float g = grad[pos_g];
sum = sum + (mat_x[pos_x] * g);
}
int pos_o = (batch * m * e) + (row * e) + col;
mat_o[pos_o] = sum;
}
__global__
void matmul1_bwd_kernel_xgrad(float *gradients, float *mat_x, float *mat_y, long *mat_i, float *mat_ox, int m, int n, int e, int o, int batch_size){
matmul1_xgrad(gradients, mat_y, mat_i, mat_ox, m, n, e, o, batch_size);
}
__global__
void matmul1_bwd_kernel_ygrad(float *gradients, float *mat_x, float *mat_y, long *mat_i, float *mat_oy, int m, int n, int e, int o, int batch_size){
matmul1_ygrad(gradients, mat_x, mat_i, mat_oy, m, n, e, o, batch_size);
}
void matmul1_bwd_cuda(at::Tensor gradients, at::Tensor mat_x, at::Tensor mat_y, at::Tensor mat_i, at::Tensor out_x, at::Tensor out_y, int m, int n, int e, int o, int b){
// Set array and CUDA block/grid sizes
dim3 block(N_THREADS_E, N_THREADS_N, 1);
dim3 grid((int)ceil(((float)e)/N_THREADS_E), (int)ceil(((float)std::max(n, m))/N_THREADS_N), b);
// Call kernel
matmul1_bwd_kernel_ygrad<<<grid, block>>>(gradients.data<float>(), mat_x.data<float>(), mat_y.data<float>(), mat_i.data<long>(), out_y.data<float>(), m, n, e, o, b);
dim3 block_xgrad(N_THREADS_E, N_THREADS_N, 1);
dim3 grid_xgrad((int)ceil(((float)e)/N_THREADS_E), (int)ceil(((float)std::max(n, m))/N_THREADS_N), b);
// Call kernel
matmul1_bwd_kernel_xgrad<<<grid_xgrad, block_xgrad>>>(gradients.data<float>(), mat_x.data<float>(), mat_y.data<float>(), mat_i.data<long>(), out_x.data<float>(), m, n, e, o, b);
return;
}
|
bbe58749c03251c8a4c971cd1629196a7a7e8fc6.hip | // !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/for_each.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <typename T>
class mark_present_for_each
{
public:
T * ptr;
__host__ __device__ void operator()(T x){ ptr[(int) x] = 1; }
};
template <class Vector>
void TestForEachSimple(void)
{
typedef typename Vector::value_type T;
Vector input(5);
Vector output(7, (T) 0);
input[0] = 3; input[1] = 2; input[2] = 3; input[3] = 4; input[4] = 6;
mark_present_for_each<T> f;
f.ptr = thrust::raw_pointer_cast(output.data());
typename Vector::iterator result = thrust::for_each(input.begin(), input.end(), f);
ASSERT_EQUAL(output[0], 0);
ASSERT_EQUAL(output[1], 0);
ASSERT_EQUAL(output[2], 1);
ASSERT_EQUAL(output[3], 1);
ASSERT_EQUAL(output[4], 1);
ASSERT_EQUAL(output[5], 0);
ASSERT_EQUAL(output[6], 1);
ASSERT_EQUAL_QUIET(result, input.end());
}
DECLARE_VECTOR_UNITTEST(TestForEachSimple);
template <class Vector>
void TestForEachNSimple(void)
{
typedef typename Vector::value_type T;
Vector input(5);
Vector output(7, (T) 0);
input[0] = 3; input[1] = 2; input[2] = 3; input[3] = 4; input[4] = 6;
mark_present_for_each<T> f;
f.ptr = thrust::raw_pointer_cast(output.data());
typename Vector::iterator result = thrust::for_each_n(input.begin(), input.size(), f);
ASSERT_EQUAL(output[0], 0);
ASSERT_EQUAL(output[1], 0);
ASSERT_EQUAL(output[2], 1);
ASSERT_EQUAL(output[3], 1);
ASSERT_EQUAL(output[4], 1);
ASSERT_EQUAL(output[5], 0);
ASSERT_EQUAL(output[6], 1);
ASSERT_EQUAL_QUIET(result, input.end());
}
DECLARE_VECTOR_UNITTEST(TestForEachNSimple);
void TestForEachSimpleAnySystem(void)
{
thrust::device_vector<int> output(7, 0);
mark_present_for_each<int> f;
f.ptr = thrust::raw_pointer_cast(output.data());
thrust::counting_iterator<int> result = thrust::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(5), f);
ASSERT_EQUAL(output[0], 1);
ASSERT_EQUAL(output[1], 1);
ASSERT_EQUAL(output[2], 1);
ASSERT_EQUAL(output[3], 1);
ASSERT_EQUAL(output[4], 1);
ASSERT_EQUAL(output[5], 0);
ASSERT_EQUAL(output[6], 0);
ASSERT_EQUAL_QUIET(result, thrust::make_counting_iterator(5));
}
DECLARE_UNITTEST(TestForEachSimpleAnySystem);
void TestForEachNSimpleAnySystem(void)
{
thrust::device_vector<int> output(7, 0);
mark_present_for_each<int> f;
f.ptr = thrust::raw_pointer_cast(output.data());
thrust::counting_iterator<int> result = thrust::for_each_n(thrust::make_counting_iterator(0), 5, f);
ASSERT_EQUAL(output[0], 1);
ASSERT_EQUAL(output[1], 1);
ASSERT_EQUAL(output[2], 1);
ASSERT_EQUAL(output[3], 1);
ASSERT_EQUAL(output[4], 1);
ASSERT_EQUAL(output[5], 0);
ASSERT_EQUAL(output[6], 0);
ASSERT_EQUAL_QUIET(result, thrust::make_counting_iterator(5));
}
DECLARE_UNITTEST(TestForEachNSimpleAnySystem);
template <typename T>
void TestForEach(const size_t n)
{
const size_t output_size = ::min((size_t) 10, 2 * n);
thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
for(size_t i = 0; i < n; i++)
h_input[i] = ((size_t) h_input[i]) % output_size;
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(output_size, (T) 0);
thrust::device_vector<T> d_output(output_size, (T) 0);
mark_present_for_each<T> h_f;
mark_present_for_each<T> d_f;
h_f.ptr = &h_output[0];
d_f.ptr = (&d_output[0]).get();
typename thrust::host_vector<T>::iterator h_result =
thrust::for_each(h_input.begin(), h_input.end(), h_f);
typename thrust::device_vector<T>::iterator d_result =
thrust::for_each(d_input.begin(), d_input.end(), d_f);
ASSERT_EQUAL(h_output, d_output);
ASSERT_EQUAL_QUIET(h_result, h_input.end());
ASSERT_EQUAL_QUIET(d_result, d_input.end());
}
DECLARE_VARIABLE_UNITTEST(TestForEach);
template <typename T>
void TestForEachN(const size_t n)
{
const size_t output_size = ::min((size_t) 10, 2 * n);
thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
for(size_t i = 0; i < n; i++)
h_input[i] = ((size_t) h_input[i]) % output_size;
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(output_size, (T) 0);
thrust::device_vector<T> d_output(output_size, (T) 0);
mark_present_for_each<T> h_f;
mark_present_for_each<T> d_f;
h_f.ptr = &h_output[0];
d_f.ptr = (&d_output[0]).get();
typename thrust::host_vector<T>::iterator h_result =
thrust::for_each_n(h_input.begin(), h_input.size(), h_f);
typename thrust::device_vector<T>::iterator d_result =
thrust::for_each_n(d_input.begin(), d_input.size(), d_f);
ASSERT_EQUAL(h_output, d_output);
ASSERT_EQUAL_QUIET(h_result, h_input.end());
ASSERT_EQUAL_QUIET(d_result, d_input.end());
}
DECLARE_VARIABLE_UNITTEST(TestForEachN);
template <size_t N> __host__ __device__ void f (int * x) { int temp = *x; f<N - 1>(x + 1); *x = temp;};
template <> __host__ __device__ void f<0>(int * x) { }
template <size_t N>
struct CopyFunctorWithManyRegisters
{
__host__ __device__
void operator()(int * ptr)
{
f<N>(ptr);
}
};
void TestForEachLargeRegisterFootprint()
{
const size_t N = 100;
thrust::device_vector<int> data(N, 12345);
thrust::device_vector<int *> input(1, thrust::raw_pointer_cast(&data[0])); // length is irrelevant
thrust::for_each(input.begin(), input.end(), CopyFunctorWithManyRegisters<N>());
}
DECLARE_UNITTEST(TestForEachLargeRegisterFootprint);
void TestForEachNLargeRegisterFootprint()
{
const size_t N = 100;
thrust::device_vector<int> data(N, 12345);
thrust::device_vector<int *> input(1, thrust::raw_pointer_cast(&data[0])); // length is irrelevant
thrust::for_each_n(input.begin(), input.size(), CopyFunctorWithManyRegisters<N>());
}
DECLARE_UNITTEST(TestForEachNLargeRegisterFootprint);
template <typename T, unsigned int N>
struct SetFixedVectorToConstant
{
FixedVector<T,N> exemplar;
SetFixedVectorToConstant(T scalar) : exemplar(scalar) {}
__host__ __device__
void operator()(FixedVector<T,N>& t)
{
t = exemplar;
}
};
template <typename T, unsigned int N>
void _TestForEachWithLargeTypes(void)
{
size_t n = (64 * 1024) / sizeof(FixedVector<T,N>);
thrust::host_vector< FixedVector<T,N> > h_data(n);
for(size_t i = 0; i < h_data.size(); i++)
h_data[i] = FixedVector<T,N>(i);
thrust::device_vector< FixedVector<T,N> > d_data = h_data;
SetFixedVectorToConstant<T,N> func(123);
thrust::for_each(h_data.begin(), h_data.end(), func);
thrust::for_each(d_data.begin(), d_data.end(), func);
ASSERT_EQUAL_QUIET(h_data, d_data);
}
void TestForEachWithLargeTypes(void)
{
_TestForEachWithLargeTypes<int, 1>();
_TestForEachWithLargeTypes<int, 2>();
_TestForEachWithLargeTypes<int, 4>();
_TestForEachWithLargeTypes<int, 8>();
_TestForEachWithLargeTypes<int, 16>();
KNOWN_FAILURE;
//_TestForEachWithLargeTypes<int, 32>(); // fails on Linux 32 w/ gcc 4.1
//_TestForEachWithLargeTypes<int, 64>();
//_TestForEachWithLargeTypes<int, 128>();
//_TestForEachWithLargeTypes<int, 256>();
//_TestForEachWithLargeTypes<int, 512>();
//_TestForEachWithLargeTypes<int, 1024>(); // fails on Vista 64 w/ VS2008
}
DECLARE_UNITTEST(TestForEachWithLargeTypes);
template <typename T, unsigned int N>
void _TestForEachNWithLargeTypes(void)
{
size_t n = (64 * 1024) / sizeof(FixedVector<T,N>);
thrust::host_vector< FixedVector<T,N> > h_data(n);
for(size_t i = 0; i < h_data.size(); i++)
h_data[i] = FixedVector<T,N>(i);
thrust::device_vector< FixedVector<T,N> > d_data = h_data;
SetFixedVectorToConstant<T,N> func(123);
thrust::for_each_n(h_data.begin(), h_data.size(), func);
thrust::for_each_n(d_data.begin(), d_data.size(), func);
ASSERT_EQUAL_QUIET(h_data, d_data);
}
void TestForEachNWithLargeTypes(void)
{
_TestForEachNWithLargeTypes<int, 1>();
_TestForEachNWithLargeTypes<int, 2>();
_TestForEachNWithLargeTypes<int, 4>();
_TestForEachNWithLargeTypes<int, 8>();
_TestForEachNWithLargeTypes<int, 16>();
KNOWN_FAILURE;
//_TestForEachNWithLargeTypes<int, 32>(); // fails on Linux 32 w/ gcc 4.1
//_TestForEachNWithLargeTypes<int, 64>();
//_TestForEachNWithLargeTypes<int, 128>();
//_TestForEachNWithLargeTypes<int, 256>();
//_TestForEachNWithLargeTypes<int, 512>();
//_TestForEachNWithLargeTypes<int, 1024>(); // fails on Vista 64 w/ VS2008
}
DECLARE_UNITTEST(TestForEachNWithLargeTypes);
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
| bbe58749c03251c8a4c971cd1629196a7a7e8fc6.cu | #include <unittest/unittest.h>
#include <thrust/for_each.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <typename T>
class mark_present_for_each
{
public:
T * ptr;
__host__ __device__ void operator()(T x){ ptr[(int) x] = 1; }
};
template <class Vector>
void TestForEachSimple(void)
{
typedef typename Vector::value_type T;
Vector input(5);
Vector output(7, (T) 0);
input[0] = 3; input[1] = 2; input[2] = 3; input[3] = 4; input[4] = 6;
mark_present_for_each<T> f;
f.ptr = thrust::raw_pointer_cast(output.data());
typename Vector::iterator result = thrust::for_each(input.begin(), input.end(), f);
ASSERT_EQUAL(output[0], 0);
ASSERT_EQUAL(output[1], 0);
ASSERT_EQUAL(output[2], 1);
ASSERT_EQUAL(output[3], 1);
ASSERT_EQUAL(output[4], 1);
ASSERT_EQUAL(output[5], 0);
ASSERT_EQUAL(output[6], 1);
ASSERT_EQUAL_QUIET(result, input.end());
}
DECLARE_VECTOR_UNITTEST(TestForEachSimple);
template <class Vector>
void TestForEachNSimple(void)
{
typedef typename Vector::value_type T;
Vector input(5);
Vector output(7, (T) 0);
input[0] = 3; input[1] = 2; input[2] = 3; input[3] = 4; input[4] = 6;
mark_present_for_each<T> f;
f.ptr = thrust::raw_pointer_cast(output.data());
typename Vector::iterator result = thrust::for_each_n(input.begin(), input.size(), f);
ASSERT_EQUAL(output[0], 0);
ASSERT_EQUAL(output[1], 0);
ASSERT_EQUAL(output[2], 1);
ASSERT_EQUAL(output[3], 1);
ASSERT_EQUAL(output[4], 1);
ASSERT_EQUAL(output[5], 0);
ASSERT_EQUAL(output[6], 1);
ASSERT_EQUAL_QUIET(result, input.end());
}
DECLARE_VECTOR_UNITTEST(TestForEachNSimple);
void TestForEachSimpleAnySystem(void)
{
thrust::device_vector<int> output(7, 0);
mark_present_for_each<int> f;
f.ptr = thrust::raw_pointer_cast(output.data());
thrust::counting_iterator<int> result = thrust::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(5), f);
ASSERT_EQUAL(output[0], 1);
ASSERT_EQUAL(output[1], 1);
ASSERT_EQUAL(output[2], 1);
ASSERT_EQUAL(output[3], 1);
ASSERT_EQUAL(output[4], 1);
ASSERT_EQUAL(output[5], 0);
ASSERT_EQUAL(output[6], 0);
ASSERT_EQUAL_QUIET(result, thrust::make_counting_iterator(5));
}
DECLARE_UNITTEST(TestForEachSimpleAnySystem);
void TestForEachNSimpleAnySystem(void)
{
thrust::device_vector<int> output(7, 0);
mark_present_for_each<int> f;
f.ptr = thrust::raw_pointer_cast(output.data());
thrust::counting_iterator<int> result = thrust::for_each_n(thrust::make_counting_iterator(0), 5, f);
ASSERT_EQUAL(output[0], 1);
ASSERT_EQUAL(output[1], 1);
ASSERT_EQUAL(output[2], 1);
ASSERT_EQUAL(output[3], 1);
ASSERT_EQUAL(output[4], 1);
ASSERT_EQUAL(output[5], 0);
ASSERT_EQUAL(output[6], 0);
ASSERT_EQUAL_QUIET(result, thrust::make_counting_iterator(5));
}
DECLARE_UNITTEST(TestForEachNSimpleAnySystem);
template <typename T>
void TestForEach(const size_t n)
{
const size_t output_size = std::min((size_t) 10, 2 * n);
thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
for(size_t i = 0; i < n; i++)
h_input[i] = ((size_t) h_input[i]) % output_size;
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(output_size, (T) 0);
thrust::device_vector<T> d_output(output_size, (T) 0);
mark_present_for_each<T> h_f;
mark_present_for_each<T> d_f;
h_f.ptr = &h_output[0];
d_f.ptr = (&d_output[0]).get();
typename thrust::host_vector<T>::iterator h_result =
thrust::for_each(h_input.begin(), h_input.end(), h_f);
typename thrust::device_vector<T>::iterator d_result =
thrust::for_each(d_input.begin(), d_input.end(), d_f);
ASSERT_EQUAL(h_output, d_output);
ASSERT_EQUAL_QUIET(h_result, h_input.end());
ASSERT_EQUAL_QUIET(d_result, d_input.end());
}
DECLARE_VARIABLE_UNITTEST(TestForEach);
template <typename T>
void TestForEachN(const size_t n)
{
const size_t output_size = std::min((size_t) 10, 2 * n);
thrust::host_vector<T> h_input = unittest::random_integers<T>(n);
for(size_t i = 0; i < n; i++)
h_input[i] = ((size_t) h_input[i]) % output_size;
thrust::device_vector<T> d_input = h_input;
thrust::host_vector<T> h_output(output_size, (T) 0);
thrust::device_vector<T> d_output(output_size, (T) 0);
mark_present_for_each<T> h_f;
mark_present_for_each<T> d_f;
h_f.ptr = &h_output[0];
d_f.ptr = (&d_output[0]).get();
typename thrust::host_vector<T>::iterator h_result =
thrust::for_each_n(h_input.begin(), h_input.size(), h_f);
typename thrust::device_vector<T>::iterator d_result =
thrust::for_each_n(d_input.begin(), d_input.size(), d_f);
ASSERT_EQUAL(h_output, d_output);
ASSERT_EQUAL_QUIET(h_result, h_input.end());
ASSERT_EQUAL_QUIET(d_result, d_input.end());
}
DECLARE_VARIABLE_UNITTEST(TestForEachN);
template <size_t N> __host__ __device__ void f (int * x) { int temp = *x; f<N - 1>(x + 1); *x = temp;};
template <> __host__ __device__ void f<0>(int * x) { }
template <size_t N>
struct CopyFunctorWithManyRegisters
{
__host__ __device__
void operator()(int * ptr)
{
f<N>(ptr);
}
};
void TestForEachLargeRegisterFootprint()
{
const size_t N = 100;
thrust::device_vector<int> data(N, 12345);
thrust::device_vector<int *> input(1, thrust::raw_pointer_cast(&data[0])); // length is irrelevant
thrust::for_each(input.begin(), input.end(), CopyFunctorWithManyRegisters<N>());
}
DECLARE_UNITTEST(TestForEachLargeRegisterFootprint);
void TestForEachNLargeRegisterFootprint()
{
const size_t N = 100;
thrust::device_vector<int> data(N, 12345);
thrust::device_vector<int *> input(1, thrust::raw_pointer_cast(&data[0])); // length is irrelevant
thrust::for_each_n(input.begin(), input.size(), CopyFunctorWithManyRegisters<N>());
}
DECLARE_UNITTEST(TestForEachNLargeRegisterFootprint);
template <typename T, unsigned int N>
struct SetFixedVectorToConstant
{
FixedVector<T,N> exemplar;
SetFixedVectorToConstant(T scalar) : exemplar(scalar) {}
__host__ __device__
void operator()(FixedVector<T,N>& t)
{
t = exemplar;
}
};
template <typename T, unsigned int N>
void _TestForEachWithLargeTypes(void)
{
size_t n = (64 * 1024) / sizeof(FixedVector<T,N>);
thrust::host_vector< FixedVector<T,N> > h_data(n);
for(size_t i = 0; i < h_data.size(); i++)
h_data[i] = FixedVector<T,N>(i);
thrust::device_vector< FixedVector<T,N> > d_data = h_data;
SetFixedVectorToConstant<T,N> func(123);
thrust::for_each(h_data.begin(), h_data.end(), func);
thrust::for_each(d_data.begin(), d_data.end(), func);
ASSERT_EQUAL_QUIET(h_data, d_data);
}
void TestForEachWithLargeTypes(void)
{
_TestForEachWithLargeTypes<int, 1>();
_TestForEachWithLargeTypes<int, 2>();
_TestForEachWithLargeTypes<int, 4>();
_TestForEachWithLargeTypes<int, 8>();
_TestForEachWithLargeTypes<int, 16>();
KNOWN_FAILURE;
//_TestForEachWithLargeTypes<int, 32>(); // fails on Linux 32 w/ gcc 4.1
//_TestForEachWithLargeTypes<int, 64>();
//_TestForEachWithLargeTypes<int, 128>();
//_TestForEachWithLargeTypes<int, 256>();
//_TestForEachWithLargeTypes<int, 512>();
//_TestForEachWithLargeTypes<int, 1024>(); // fails on Vista 64 w/ VS2008
}
DECLARE_UNITTEST(TestForEachWithLargeTypes);
template <typename T, unsigned int N>
void _TestForEachNWithLargeTypes(void)
{
size_t n = (64 * 1024) / sizeof(FixedVector<T,N>);
thrust::host_vector< FixedVector<T,N> > h_data(n);
for(size_t i = 0; i < h_data.size(); i++)
h_data[i] = FixedVector<T,N>(i);
thrust::device_vector< FixedVector<T,N> > d_data = h_data;
SetFixedVectorToConstant<T,N> func(123);
thrust::for_each_n(h_data.begin(), h_data.size(), func);
thrust::for_each_n(d_data.begin(), d_data.size(), func);
ASSERT_EQUAL_QUIET(h_data, d_data);
}
void TestForEachNWithLargeTypes(void)
{
_TestForEachNWithLargeTypes<int, 1>();
_TestForEachNWithLargeTypes<int, 2>();
_TestForEachNWithLargeTypes<int, 4>();
_TestForEachNWithLargeTypes<int, 8>();
_TestForEachNWithLargeTypes<int, 16>();
KNOWN_FAILURE;
//_TestForEachNWithLargeTypes<int, 32>(); // fails on Linux 32 w/ gcc 4.1
//_TestForEachNWithLargeTypes<int, 64>();
//_TestForEachNWithLargeTypes<int, 128>();
//_TestForEachNWithLargeTypes<int, 256>();
//_TestForEachNWithLargeTypes<int, 512>();
//_TestForEachNWithLargeTypes<int, 1024>(); // fails on Vista 64 w/ VS2008
}
DECLARE_UNITTEST(TestForEachNWithLargeTypes);
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
|
1c0e482c2716843ee3b3ec6486ac570a9b62783b.hip | // !!! This is a file automatically generated by hipify!!!
#include "rosalind.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// Problem 2: Transcribing DNA into RNA
__global__ void revckernel(const char* in, char* out)
{
int oindex = blockDim.x - threadIdx.x;
switch (in[threadIdx.x]) {
case 'A':
out[oindex] = 'T';
break;
case 'C':
out[oindex] = 'G';
break;
case 'G':
out[oindex] = 'C';
break;
case 'T':
out[oindex] = 'A';
break;
default:
break;
}
}
hipError_t dnacomplement(const char* in, char* out, unsigned int size)
{
char* devin = 0;
char* devout = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Couldn't find CUDA-compatible device.\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&devin, size * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Couldn't allocate memory for input on GPU.\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&devout, size * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Couldn't allocate memory for output on GPU.\n");
goto Error;
}
cudaStatus = hipMemcpy(devin, in, size * sizeof(char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Couldn't copy input to GPU: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
hipLaunchKernelGGL(( revckernel) , dim3(1), dim3(size), 0, 0, devin, devout);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Couldn't execute ntcountkernel: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Couldn't synchronize threads for ntcountkernel: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
/* We do a bunch of terrible string hacks here because the null character keeps getting copied to the beginning of the string.
I should come up with a more elegant solution, in case this comes up in future problems. */
cudaStatus = hipMemcpy(out, devout + 1, (size - 1) * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Couldn't copy output to host device.\n");
goto Error;
}
switch (in[0]) {
case 'A':
out[size - 1] = 'T';
break;
case 'C':
out[size - 1] = 'G';
break;
case 'G':
out[size - 1] = 'C';
break;
case 'T':
out[size - 1] = 'A';
break;
default:
break;
}
out[size] = '\0';
Error:
hipFree(devin);
hipFree(devout);
return cudaStatus;
} | 1c0e482c2716843ee3b3ec6486ac570a9b62783b.cu | #include "rosalind.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// Problem 2: Transcribing DNA into RNA
__global__ void revckernel(const char* in, char* out)
{
int oindex = blockDim.x - threadIdx.x;
switch (in[threadIdx.x]) {
case 'A':
out[oindex] = 'T';
break;
case 'C':
out[oindex] = 'G';
break;
case 'G':
out[oindex] = 'C';
break;
case 'T':
out[oindex] = 'A';
break;
default:
break;
}
}
cudaError_t dnacomplement(const char* in, char* out, unsigned int size)
{
char* devin = 0;
char* devout = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Couldn't find CUDA-compatible device.\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&devin, size * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Couldn't allocate memory for input on GPU.\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&devout, size * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Couldn't allocate memory for output on GPU.\n");
goto Error;
}
cudaStatus = cudaMemcpy(devin, in, size * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Couldn't copy input to GPU: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
revckernel <<<1, size>>> (devin, devout);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Couldn't execute ntcountkernel: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Couldn't synchronize threads for ntcountkernel: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
/* We do a bunch of terrible string hacks here because the null character keeps getting copied to the beginning of the string.
I should come up with a more elegant solution, in case this comes up in future problems. */
cudaStatus = cudaMemcpy(out, devout + 1, (size - 1) * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Couldn't copy output to host device.\n");
goto Error;
}
switch (in[0]) {
case 'A':
out[size - 1] = 'T';
break;
case 'C':
out[size - 1] = 'G';
break;
case 'G':
out[size - 1] = 'C';
break;
case 'T':
out[size - 1] = 'A';
break;
default:
break;
}
out[size] = '\0';
Error:
cudaFree(devin);
cudaFree(devout);
return cudaStatus;
} |
5f1cc9f153f194309431d868d4df4e8fd11fed30.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
#ifdef USE_DOUBLE
#define dfloat double
#else
#define dfloat float
#endif
#ifndef POLYNOMIAL_ORDER
#define POLYNOMIAL_ORDER 4
#endif
// note the order of the fields below is also assumed in the code.
const int64_t _nstate = 5;
const int64_t _R = 0, _U = 1, _V = 2, _W = 3, _E = 4;
const int64_t _nvgeo = 14;
const int64_t _XIx = 0;
const int64_t _ETAx = 1;
const int64_t _ZETAx = 2;
const int64_t _XIy = 3;
const int64_t _ETAy = 4;
const int64_t _ZETAy = 5;
const int64_t _XIz = 6;
const int64_t _ETAz = 7;
const int64_t _ZETAz = 8;
const int64_t _MJ = 9;
const int64_t _MJI = 10;
const int64_t _x = 11;
const int64_t _y = 12;
const int64_t _z = 13;
#define grav ((dfloat) 9.81)
#define gdm1 ((dfloat) 0.4)
template <int64_t Nq, int64_t Np, int64_t nvar>
__global__ void volumerhs(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int64_t nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][_nstate];
dfloat r_rhsR[Nq];
dfloat r_rhsU[Nq];
dfloat r_rhsV[Nq];
dfloat r_rhsW[Nq];
dfloat r_rhsE[Nq];
int64_t e = blockIdx.x;
int64_t j = threadIdx.y;
int64_t i = threadIdx.x;
s_D[j][i] = D[j*Nq+i];
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
r_rhsR[k] = 0;
r_rhsU[k] = 0;
r_rhsV[k] = 0;
r_rhsW[k] = 0;
r_rhsE[k] = 0;
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
__syncthreads();
// Load values will need int64_to registers
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][ _R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][ _U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][ _V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][ _W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][ _E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][ _R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][ _U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][ _V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][ _W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][ _E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
dfloat r_HR = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
dfloat r_HU = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
dfloat r_HV = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
dfloat r_HW = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
dfloat r_HE = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
// one shared access per 10 flops
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dnk = s_D[n][k];
r_rhsR[n] += Dnk * r_HR;
r_rhsU[n] += Dnk * r_HU;
r_rhsV[n] += Dnk * r_HV;
r_rhsW[n] += Dnk * r_HW;
r_rhsE[n] += Dnk * r_HE;
}
r_rhsW[k] -= MJ * R * gravity;
__syncthreads();
// loop of XI-grid lines
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dni = s_D[n][i];
dfloat Dnj = s_D[n][j];
r_rhsR[k] += Dni * s_F[n][j][_R];
r_rhsR[k] += Dnj * s_G[i][n][_R];
r_rhsU[k] += Dni * s_F[n][j][_U];
r_rhsU[k] += Dnj * s_G[i][n][_U];
r_rhsV[k] += Dni * s_F[n][j][_V];
r_rhsV[k] += Dnj * s_G[i][n][_V];
r_rhsW[k] += Dni * s_F[n][j][_W];
r_rhsW[k] += Dnj * s_G[i][n][_W];
r_rhsE[k] += Dni * s_F[n][j][_E];
r_rhsE[k] += Dnj * s_G[i][n][_E];
}
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJI = vgeo[gid + _MJI*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
rhs[qid+_U*Np] += MJI*r_rhsU[k];
rhs[qid+_V*Np] += MJI*r_rhsV[k];
rhs[qid+_W*Np] += MJI*r_rhsW[k];
rhs[qid+_R*Np] += MJI*r_rhsR[k];
rhs[qid+_E*Np] += MJI*r_rhsE[k];
}
}
void randArray(int64_t N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
hipMalloc(c_q, N*sizeof(dfloat));
for(int64_t n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
hipMemcpy(c_q[0], q[0], N*sizeof(dfloat), hipMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
const int64_t N = POLYNOMIAL_ORDER;
const int64_t nelem = 4000;
const int64_t Nq = N+1;
const int64_t Np = Nq*Nq*Nq;
const int64_t Ntotal = Np*nelem*_nstate;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idR = n + _R*Np + e*_nstate*Np;
int64_t idE = n + _E*Np + e*_nstate*Np;
Q[idR] += 2.;
Q[idE] += 20.;
}
}
hipMemcpy(c_Q, Q, nelem*_nstate*Np*sizeof(dfloat), hipMemcpyHostToDevice);
const int64_t Gtotal = Np*nelem*_nvgeo;
dfloat *vgeo, *c_vgeo;
randArray(Gtotal, 0, 1., &vgeo, &c_vgeo);
// Make sure the entries of the mass matrix satisfy the inverse relation
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idMJ = n + _MJ*Np + e*_nvgeo*Np;
int64_t idMJI = n + _MJI*Np + e*_nvgeo*Np;
vgeo[idMJ] += 3;
vgeo[idMJI] = 1./vgeo[idMJ];
}
}
hipMemcpy(c_vgeo, vgeo, nelem*_nvgeo*Np*sizeof(dfloat), hipMemcpyHostToDevice);
dfloat *D, *c_D;
randArray(Nq*Nq, 1., 1., &D, &c_D);
dfloat *rhs, *c_rhs;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs, &c_rhs);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
hipLaunchKernelGGL(( volumerhs<Nq, Np, _nstate>) , dim3(G), dim3(B3) , 0, 0, c_rhs, c_Q, c_vgeo, grav, c_D, nelem);
hipDeviceSynchronize();
exit(0);
return 0;
}
| 5f1cc9f153f194309431d868d4df4e8fd11fed30.cu | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
#ifdef USE_DOUBLE
#define dfloat double
#else
#define dfloat float
#endif
#ifndef POLYNOMIAL_ORDER
#define POLYNOMIAL_ORDER 4
#endif
// note the order of the fields below is also assumed in the code.
const int64_t _nstate = 5;
const int64_t _R = 0, _U = 1, _V = 2, _W = 3, _E = 4;
const int64_t _nvgeo = 14;
const int64_t _XIx = 0;
const int64_t _ETAx = 1;
const int64_t _ZETAx = 2;
const int64_t _XIy = 3;
const int64_t _ETAy = 4;
const int64_t _ZETAy = 5;
const int64_t _XIz = 6;
const int64_t _ETAz = 7;
const int64_t _ZETAz = 8;
const int64_t _MJ = 9;
const int64_t _MJI = 10;
const int64_t _x = 11;
const int64_t _y = 12;
const int64_t _z = 13;
#define grav ((dfloat) 9.81)
#define gdm1 ((dfloat) 0.4)
template <int64_t Nq, int64_t Np, int64_t nvar>
__global__ void volumerhs(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int64_t nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][_nstate];
dfloat r_rhsR[Nq];
dfloat r_rhsU[Nq];
dfloat r_rhsV[Nq];
dfloat r_rhsW[Nq];
dfloat r_rhsE[Nq];
int64_t e = blockIdx.x;
int64_t j = threadIdx.y;
int64_t i = threadIdx.x;
s_D[j][i] = D[j*Nq+i];
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
r_rhsR[k] = 0;
r_rhsU[k] = 0;
r_rhsV[k] = 0;
r_rhsW[k] = 0;
r_rhsE[k] = 0;
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
__syncthreads();
// Load values will need int64_to registers
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][ _R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][ _U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][ _V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][ _W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][ _E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][ _R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][ _U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][ _V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][ _W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][ _E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
dfloat r_HR = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
dfloat r_HU = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
dfloat r_HV = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
dfloat r_HW = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
dfloat r_HE = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
// one shared access per 10 flops
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dnk = s_D[n][k];
r_rhsR[n] += Dnk * r_HR;
r_rhsU[n] += Dnk * r_HU;
r_rhsV[n] += Dnk * r_HV;
r_rhsW[n] += Dnk * r_HW;
r_rhsE[n] += Dnk * r_HE;
}
r_rhsW[k] -= MJ * R * gravity;
__syncthreads();
// loop of XI-grid lines
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dni = s_D[n][i];
dfloat Dnj = s_D[n][j];
r_rhsR[k] += Dni * s_F[n][j][_R];
r_rhsR[k] += Dnj * s_G[i][n][_R];
r_rhsU[k] += Dni * s_F[n][j][_U];
r_rhsU[k] += Dnj * s_G[i][n][_U];
r_rhsV[k] += Dni * s_F[n][j][_V];
r_rhsV[k] += Dnj * s_G[i][n][_V];
r_rhsW[k] += Dni * s_F[n][j][_W];
r_rhsW[k] += Dnj * s_G[i][n][_W];
r_rhsE[k] += Dni * s_F[n][j][_E];
r_rhsE[k] += Dnj * s_G[i][n][_E];
}
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJI = vgeo[gid + _MJI*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
rhs[qid+_U*Np] += MJI*r_rhsU[k];
rhs[qid+_V*Np] += MJI*r_rhsV[k];
rhs[qid+_W*Np] += MJI*r_rhsW[k];
rhs[qid+_R*Np] += MJI*r_rhsR[k];
rhs[qid+_E*Np] += MJI*r_rhsE[k];
}
}
void randArray(int64_t N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
cudaMalloc(c_q, N*sizeof(dfloat));
for(int64_t n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
cudaMemcpy(c_q[0], q[0], N*sizeof(dfloat), cudaMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
const int64_t N = POLYNOMIAL_ORDER;
const int64_t nelem = 4000;
const int64_t Nq = N+1;
const int64_t Np = Nq*Nq*Nq;
const int64_t Ntotal = Np*nelem*_nstate;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idR = n + _R*Np + e*_nstate*Np;
int64_t idE = n + _E*Np + e*_nstate*Np;
Q[idR] += 2.;
Q[idE] += 20.;
}
}
cudaMemcpy(c_Q, Q, nelem*_nstate*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
const int64_t Gtotal = Np*nelem*_nvgeo;
dfloat *vgeo, *c_vgeo;
randArray(Gtotal, 0, 1., &vgeo, &c_vgeo);
// Make sure the entries of the mass matrix satisfy the inverse relation
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idMJ = n + _MJ*Np + e*_nvgeo*Np;
int64_t idMJI = n + _MJI*Np + e*_nvgeo*Np;
vgeo[idMJ] += 3;
vgeo[idMJI] = 1./vgeo[idMJ];
}
}
cudaMemcpy(c_vgeo, vgeo, nelem*_nvgeo*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
dfloat *D, *c_D;
randArray(Nq*Nq, 1., 1., &D, &c_D);
dfloat *rhs, *c_rhs;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs, &c_rhs);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
volumerhs<Nq, Np, _nstate> <<< G, B3 >>> (c_rhs, c_Q, c_vgeo, grav, c_D, nelem);
cudaDeviceSynchronize();
exit(0);
return 0;
}
|
6c748c0c07ababbc7eb776a566f3fcde264e3de9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void res_calc_gpu( const double **x, const double **phim, double *K,
double **res) {
for (int j = 0; j < 4; j++) {
for (int k = 0; k < 4; k++) {
K[j * 4 + k] = 0;
}
}
for (int i = 0; i < 4; i++) {
double det_x_xi = 0;
double N_x[8];
double a = 0;
for (int m = 0; m < 4; m++)
det_x_xi += Ng2_xi[4 * i + 16 + m] * x[m][1];
for (int m = 0; m < 4; m++)
N_x[m] = det_x_xi * Ng2_xi[4 * i + m];
a = 0;
for (int m = 0; m < 4; m++)
a += Ng2_xi[4 * i + m] * x[m][0];
for (int m = 0; m < 4; m++)
N_x[4 + m] = a * Ng2_xi[4 * i + 16 + m];
det_x_xi *= a;
a = 0;
for (int m = 0; m < 4; m++)
a += Ng2_xi[4 * i + m] * x[m][1];
for (int m = 0; m < 4; m++)
N_x[m] -= a * Ng2_xi[4 * i + 16 + m];
double b = 0;
for (int m = 0; m < 4; m++)
b += Ng2_xi[4 * i + 16 + m] * x[m][0];
for (int m = 0; m < 4; m++)
N_x[4 + m] -= b * Ng2_xi[4 * i + m];
det_x_xi -= a * b;
for (int j = 0; j < 8; j++)
N_x[j] /= det_x_xi;
double wt1 = wtg2[i] * det_x_xi;
double u[2] = {0.0, 0.0};
for (int j = 0; j < 4; j++) {
u[0] += N_x[j] * phim[j][0];
u[1] += N_x[4 + j] * phim[j][0];
}
double Dk = 1.0 + 0.5 * gm1 * (m2 - (u[0] * u[0] + u[1] * u[1]));
double rho = pow(Dk, gm1i);
double rc2 = rho / Dk;
for (int j = 0; j < 4; j++) {
res[j][0] += wt1 * rho * (u[0] * N_x[j] + u[1] * N_x[4 + j]);
}
for (int j = 0; j < 4; j++) {
for (int k = 0; k < 4; k++) {
K[j * 4 + k] +=
wt1 * rho * (N_x[j] * N_x[k] + N_x[4 + j] * N_x[4 + k]) -
wt1 * rc2 * (u[0] * N_x[j] + u[1] * N_x[4 + j]) *
(u[0] * N_x[k] + u[1] * N_x[4 + k]);
}
}
}
}
// CUDA kernel function
__global__ void op_cuda_res_calc(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
double *__restrict ind_arg2,
const int *__restrict opDat0Map,
double *arg8,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg9_l[1];
double arg10_l[1];
double arg11_l[1];
double arg12_l[1];
double *arg9_vec[4] = {
arg9_l,
arg10_l,
arg11_l,
arg12_l,
};
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
int map3idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<1; d++ ){
arg9_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg10_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg11_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg12_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat0Map[n + offset_b + set_size * 2];
map3idx = opDat0Map[n + offset_b + set_size * 3];
const double* arg0_vec[] = {
&ind_arg0[2 * map0idx],
&ind_arg0[2 * map1idx],
&ind_arg0[2 * map2idx],
&ind_arg0[2 * map3idx]};
const double* arg4_vec[] = {
&ind_arg1[1 * map0idx],
&ind_arg1[1 * map1idx],
&ind_arg1[1 * map2idx],
&ind_arg1[1 * map3idx]};
//user-supplied kernel call
res_calc_gpu(arg0_vec,
arg4_vec,
arg8+(n+offset_b)*16,
arg9_vec);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg9_l[0] += ind_arg2[0+map0idx*1];
ind_arg2[0+map0idx*1] = arg9_l[0];
arg10_l[0] += ind_arg2[0+map1idx*1];
ind_arg2[0+map1idx*1] = arg10_l[0];
arg11_l[0] += ind_arg2[0+map2idx*1];
ind_arg2[0+map2idx*1] = arg11_l[0];
arg12_l[0] += ind_arg2[0+map3idx*1];
ind_arg2[0+map3idx*1] = arg12_l[0];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg4,
op_arg arg8,
op_arg arg9){
int nargs = 13;
op_arg args[13];
arg0.idx = 0;
args[0] = arg0;
for ( int v=1; v<4; v++ ){
args[0 + v] = op_arg_dat(arg0.dat, v, arg0.map, 2, "double", OP_READ);
}
arg4.idx = 0;
args[4] = arg4;
for ( int v=1; v<4; v++ ){
args[4 + v] = op_arg_dat(arg4.dat, v, arg4.map, 1, "double", OP_READ);
}
args[8] = arg8;
arg9.idx = 0;
args[9] = arg9;
for ( int v=1; v<4; v++ ){
args[9 + v] = op_arg_dat(arg9.dat, v, arg9.map, 1, "double", OP_INC);
}
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
int ninds = 3;
int inds[13] = {0,0,0,0,1,1,1,1,-1,2,2,2,2};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread), 0, 0,
(double *)arg0.data_d,
(double *)arg4.data_d,
(double *)arg9.data_d,
arg0.map_data_d,
(double*)arg8.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
| 6c748c0c07ababbc7eb776a566f3fcde264e3de9.cu | //
// auto-generated by op2.py
//
//user function
__device__ void res_calc_gpu( const double **x, const double **phim, double *K,
double **res) {
for (int j = 0; j < 4; j++) {
for (int k = 0; k < 4; k++) {
K[j * 4 + k] = 0;
}
}
for (int i = 0; i < 4; i++) {
double det_x_xi = 0;
double N_x[8];
double a = 0;
for (int m = 0; m < 4; m++)
det_x_xi += Ng2_xi[4 * i + 16 + m] * x[m][1];
for (int m = 0; m < 4; m++)
N_x[m] = det_x_xi * Ng2_xi[4 * i + m];
a = 0;
for (int m = 0; m < 4; m++)
a += Ng2_xi[4 * i + m] * x[m][0];
for (int m = 0; m < 4; m++)
N_x[4 + m] = a * Ng2_xi[4 * i + 16 + m];
det_x_xi *= a;
a = 0;
for (int m = 0; m < 4; m++)
a += Ng2_xi[4 * i + m] * x[m][1];
for (int m = 0; m < 4; m++)
N_x[m] -= a * Ng2_xi[4 * i + 16 + m];
double b = 0;
for (int m = 0; m < 4; m++)
b += Ng2_xi[4 * i + 16 + m] * x[m][0];
for (int m = 0; m < 4; m++)
N_x[4 + m] -= b * Ng2_xi[4 * i + m];
det_x_xi -= a * b;
for (int j = 0; j < 8; j++)
N_x[j] /= det_x_xi;
double wt1 = wtg2[i] * det_x_xi;
double u[2] = {0.0, 0.0};
for (int j = 0; j < 4; j++) {
u[0] += N_x[j] * phim[j][0];
u[1] += N_x[4 + j] * phim[j][0];
}
double Dk = 1.0 + 0.5 * gm1 * (m2 - (u[0] * u[0] + u[1] * u[1]));
double rho = pow(Dk, gm1i);
double rc2 = rho / Dk;
for (int j = 0; j < 4; j++) {
res[j][0] += wt1 * rho * (u[0] * N_x[j] + u[1] * N_x[4 + j]);
}
for (int j = 0; j < 4; j++) {
for (int k = 0; k < 4; k++) {
K[j * 4 + k] +=
wt1 * rho * (N_x[j] * N_x[k] + N_x[4 + j] * N_x[4 + k]) -
wt1 * rc2 * (u[0] * N_x[j] + u[1] * N_x[4 + j]) *
(u[0] * N_x[k] + u[1] * N_x[4 + k]);
}
}
}
}
// CUDA kernel function
__global__ void op_cuda_res_calc(
const double *__restrict ind_arg0,
const double *__restrict ind_arg1,
double *__restrict ind_arg2,
const int *__restrict opDat0Map,
double *arg8,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
double arg9_l[1];
double arg10_l[1];
double arg11_l[1];
double arg12_l[1];
double *arg9_vec[4] = {
arg9_l,
arg10_l,
arg11_l,
arg12_l,
};
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
int map2idx;
int map3idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<1; d++ ){
arg9_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg10_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg11_l[d] = ZERO_double;
}
for ( int d=0; d<1; d++ ){
arg12_l[d] = ZERO_double;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
map2idx = opDat0Map[n + offset_b + set_size * 2];
map3idx = opDat0Map[n + offset_b + set_size * 3];
const double* arg0_vec[] = {
&ind_arg0[2 * map0idx],
&ind_arg0[2 * map1idx],
&ind_arg0[2 * map2idx],
&ind_arg0[2 * map3idx]};
const double* arg4_vec[] = {
&ind_arg1[1 * map0idx],
&ind_arg1[1 * map1idx],
&ind_arg1[1 * map2idx],
&ind_arg1[1 * map3idx]};
//user-supplied kernel call
res_calc_gpu(arg0_vec,
arg4_vec,
arg8+(n+offset_b)*16,
arg9_vec);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg9_l[0] += ind_arg2[0+map0idx*1];
ind_arg2[0+map0idx*1] = arg9_l[0];
arg10_l[0] += ind_arg2[0+map1idx*1];
ind_arg2[0+map1idx*1] = arg10_l[0];
arg11_l[0] += ind_arg2[0+map2idx*1];
ind_arg2[0+map2idx*1] = arg11_l[0];
arg12_l[0] += ind_arg2[0+map3idx*1];
ind_arg2[0+map3idx*1] = arg12_l[0];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_res_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg4,
op_arg arg8,
op_arg arg9){
int nargs = 13;
op_arg args[13];
arg0.idx = 0;
args[0] = arg0;
for ( int v=1; v<4; v++ ){
args[0 + v] = op_arg_dat(arg0.dat, v, arg0.map, 2, "double", OP_READ);
}
arg4.idx = 0;
args[4] = arg4;
for ( int v=1; v<4; v++ ){
args[4 + v] = op_arg_dat(arg4.dat, v, arg4.map, 1, "double", OP_READ);
}
args[8] = arg8;
arg9.idx = 0;
args[9] = arg9;
for ( int v=1; v<4; v++ ){
args[9 + v] = op_arg_dat(arg9.dat, v, arg9.map, 1, "double", OP_INC);
}
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
int ninds = 3;
int inds[13] = {0,0,0,0,1,1,1,1,-1,2,2,2,2};
if (OP_diags>2) {
printf(" kernel routine with indirection: res_calc\n");
}
//get plan
#ifdef OP_PART_SIZE_0
int part_size = OP_PART_SIZE_0;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_res_calc<<<nblocks,nthread>>>(
(double *)arg0.data_d,
(double *)arg4.data_d,
(double *)arg9.data_d,
arg0.map_data_d,
(double*)arg8.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[0].transfer += Plan->transfer;
OP_kernels[0].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
}
|
0cf1570433f0a21516260bdce754285025569bc8.hip | // !!! This is a file automatically generated by hipify!!!
#include <mpi.h>
#include <cstdio>
#include <cmath>
#include <vector>
#include <chrono>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
using namespace std;
#define THREADS_X 8
#define THREADS_Y 1
__global__ void init_rand(int seed, hiprandState_t* states){
int row = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, row, 0, &states[row]);
}
__global__ void randoms(hiprandState_t *states, float *numbers){
int row = blockIdx.x * blockDim.x + threadIdx.x;
numbers[row] = hiprand_normal(&states[row]);
}
__global__ void subA_calc(float *subA, float *A, int N, int offset, int size){
int row = blockIdx.x * blockDim.x + threadIdx.x;
//float sum = 0.0;
if(row < N/size){
for(int j = 0; j < N; j++){
subA[N * row + j] = A[N * (offset + row) + j];
}
}
}
__global__ void subB_calc(float *subB, float *B, int N, int offset, int size){
int row = blockIdx.x * blockDim.x + threadIdx.x;
//float sum = 0.0;
if(row < N){
for(int j = 0; j < N/size; j++){
subB[N/size * row + j] = B[N * row + offset + j];
}
}
}__global__ void subC_calc(float *subA, float *subB, float *subC, int N, int offset, int size){
int row = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if(row < N/size){
for(int j = 0; j < N/size; j++){
sum = subC[N * row + j + offset];
for(int k = 0; k < N; k++){
sum += subA[N * row + k] * subB[N / size * k + j];
}
subC[N * row + j + offset] = sum;
}
}
}
int main(int argc, char** argv) {
int size, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
const int N = 256;
/*vector<float> A(N*N);
vector<float> B(N*N);
vector<float> C(N*N, 0);
vector<float> subA(N*N/size);
vector<float> subB(N*N/size);
vector<float> subC(N*N/size, 0);
*/
// hipGetDeviceCount(&gpusize);
//edited
// #pragma omp parallel for
float *A, *B, *C, *subA, *subB, *subC;
//float *h_Arand, *h_Brand;
float *d_A, *d_B, *d_C, *d_subA, *d_subB, *d_subC;
//float *d_Arand, *d_Brand;
int K = 1;
vector<float> recv(N*N/size);
int grid_x = ceil((N + THREADS_X - 1) / THREADS_X);
int grid_y = ceil((K + THREADS_Y - 1) / THREADS_Y);
dim3 dimGrid(grid_x, grid_y);
dim3 dimBlock(THREADS_X, THREADS_Y);
hipHostMalloc((void**)&A, sizeof(float) * N * N);
hipHostMalloc((void**)&B, sizeof(float) * N * N);
hipHostMalloc((void**)&C, sizeof(float) * N * N);
hipHostMalloc((void**)&subA, sizeof(float) * N * N/size);
hipHostMalloc((void**)&subB, sizeof(float) * N * N/size);
hipHostMalloc((void**)&subC, sizeof(float) * N * N/size);
/*hipHostMalloc((void**)&h_Arand, sizeof(float) * N * N);
hipMalloc((void**)&d_Arand, sizeof(float) * N * N);
hipHostMalloc((void**)&h_Brand, sizeof(float) * N * N);
hipMalloc((void**)&d_Brand, sizeof(float) * N * N);
hiprandState_t *states;
hipMalloc((void**)&states, sizeof(hiprandState_t) * N * N);
init_rand<<<dimGrid, dimBlock>>>(1, states);
randoms<<<dimGrid,dimBlock>>>(states, d_Arand);
hipMemcpy(h_Arand, d_Arand, sizeof(float) * N * N, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
randoms<<<dimGrid,dimBlock>>>(states, d_Brand);
hipMemcpy(h_Brand, d_Brand, sizeof(float) * N * N, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
*/
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
A[N*i+j] = drand48();
//A[N*i+j] = h_Arand[N*i+j];
//printf("%f\n", A[N*i+j]);
B[N*i+j] = drand48();
//B[N*i+j] = h_Brand[N*i+j];
}
}
int offset = N/size*rank;
hipMalloc((void**)&d_A, sizeof(float) * N * N);
hipMalloc((void**)&d_B, sizeof(float) * N * N);
hipMalloc((void**)&d_C, sizeof(float) * N * N);
hipMalloc((void**)&d_subA, sizeof(float) * N * N/size);
hipMalloc((void**)&d_subB, sizeof(float) * N * N/size);
hipMalloc((void**)&d_subC, sizeof(float) * N * N/size);
hipMemcpy(d_A, A, sizeof(float) * N * N, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, sizeof(float) * N * N, hipMemcpyHostToDevice);
hipMemcpy(d_C, C, sizeof(float) * N * N, hipMemcpyHostToDevice);
hipMemcpy(d_subA, subA, sizeof(float) * N * N/size, hipMemcpyHostToDevice);
hipMemcpy(d_subB, subB, sizeof(float) * N * N/size, hipMemcpyHostToDevice);
hipMemcpy(d_subC, subC, sizeof(float) * N * N/size, hipMemcpyHostToDevice);
/*int K = 1;
int grid_x = ceil((N + THREADS_X - 1) / THREADS_X);
int grid_y = ceil((K + THREADS_Y - 1) / THREADS_Y);
dim3 dimGrid(grid_x, grid_y);
dim3 dimBlock(THREADS_X, THREADS_Y);
*/
/* for (int i=0; i<N/size; i++){
for (int j=0; j<N; j++){
subA[N*i+j] = A[N*(i+offset)+j];
}
}*/
hipLaunchKernelGGL((
subA_calc), dim3(dimGrid), dim3(dimBlock), 0, 0, d_subA, d_A, N, offset,size);
hipMemcpy(subA, d_subA, sizeof(float) * N * N/size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
/* for (int i=0; i<N; i++)
for (int j=0; j<N/size; j++)
subB[N/size*i+j] = B[N*i+j+offset];
*/
hipLaunchKernelGGL((
subB_calc), dim3(dimGrid), dim3(dimBlock), 0, 0, d_subB, d_B, N, offset, size);
hipMemcpy(subB, d_subB, sizeof(float) * N * N/size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
int recv_from = (rank + 1) % size;
int send_to = (rank - 1 + size) % size;
double comp_time = 0, comm_time = 0;
for(int irank=0; irank<size; irank++) {
auto tic = chrono::steady_clock::now();
offset = N/size*((rank+irank) % size);
/* for (int i=0; i<N/size; i++)
for (int j=0; j<N/size; j++)
for (int k=0; k<N; k++)
subC[N*i+j+offset] += subA[N*i+k] * subB[N/size*k+j];
*/
hipLaunchKernelGGL(( subC_calc), dim3(dimGrid), dim3(dimBlock), 0, 0, d_subA, d_subB, d_subC, N, offset, size);
hipMemcpy(subC, d_subC, sizeof(float) * N * N/size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
auto toc = chrono::steady_clock::now();
comp_time += chrono::duration<double>(toc - tic).count();
MPI_Request request[2];
MPI_Isend(&subB[0], N*N/size, MPI_FLOAT, send_to, 0, MPI_COMM_WORLD, &request[0]);
MPI_Irecv(&subB[0], N*N/size, MPI_FLOAT, recv_from, 0, MPI_COMM_WORLD, &request[1]);
MPI_Waitall(2, request, MPI_STATUS_IGNORE);
for (int i=0; i<N*N/size; i++)
subB[i] = recv[i];
tic = chrono::steady_clock::now();
comm_time += chrono::duration<double>(tic - toc).count();
}
MPI_Allgather(&subC[0], N*N/size, MPI_FLOAT, &C[0], N*N/size, MPI_FLOAT, MPI_COMM_WORLD);
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
for (int k=0; k<N; k++)
C[N*i+j] -= A[N*i+k] * B[N*k+j];
double err = 0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
err += fabs(C[N*i+j]);
if(rank==0) {
double time = comp_time+comm_time;
printf("N : %d\n",N);
printf("comp : %lf s\n", comp_time);
printf("comm : %lf s\n", comm_time);
printf("total: %lf s (%lf GFlops)\n",time,2.*N*N*N/time/1e9);
printf("error: %lf\n",err/N/N);
}
MPI_Finalize();
hipHostFree(A);
hipHostFree(B);
hipHostFree(C);
hipHostFree(subA);
hipHostFree(subB);
hipHostFree(subC);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_subA);
hipFree(d_subB);
hipFree(d_subC);
}
| 0cf1570433f0a21516260bdce754285025569bc8.cu | #include <mpi.h>
#include <cstdio>
#include <cmath>
#include <vector>
#include <chrono>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
#define THREADS_X 8
#define THREADS_Y 1
__global__ void init_rand(int seed, curandState_t* states){
int row = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, row, 0, &states[row]);
}
__global__ void randoms(curandState_t *states, float *numbers){
int row = blockIdx.x * blockDim.x + threadIdx.x;
numbers[row] = curand_normal(&states[row]);
}
__global__ void subA_calc(float *subA, float *A, int N, int offset, int size){
int row = blockIdx.x * blockDim.x + threadIdx.x;
//float sum = 0.0;
if(row < N/size){
for(int j = 0; j < N; j++){
subA[N * row + j] = A[N * (offset + row) + j];
}
}
}
__global__ void subB_calc(float *subB, float *B, int N, int offset, int size){
int row = blockIdx.x * blockDim.x + threadIdx.x;
//float sum = 0.0;
if(row < N){
for(int j = 0; j < N/size; j++){
subB[N/size * row + j] = B[N * row + offset + j];
}
}
}__global__ void subC_calc(float *subA, float *subB, float *subC, int N, int offset, int size){
int row = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if(row < N/size){
for(int j = 0; j < N/size; j++){
sum = subC[N * row + j + offset];
for(int k = 0; k < N; k++){
sum += subA[N * row + k] * subB[N / size * k + j];
}
subC[N * row + j + offset] = sum;
}
}
}
int main(int argc, char** argv) {
int size, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
const int N = 256;
/*vector<float> A(N*N);
vector<float> B(N*N);
vector<float> C(N*N, 0);
vector<float> subA(N*N/size);
vector<float> subB(N*N/size);
vector<float> subC(N*N/size, 0);
*/
// cudaGetDeviceCount(&gpusize);
//edited
// #pragma omp parallel for
float *A, *B, *C, *subA, *subB, *subC;
//float *h_Arand, *h_Brand;
float *d_A, *d_B, *d_C, *d_subA, *d_subB, *d_subC;
//float *d_Arand, *d_Brand;
int K = 1;
vector<float> recv(N*N/size);
int grid_x = ceil((N + THREADS_X - 1) / THREADS_X);
int grid_y = ceil((K + THREADS_Y - 1) / THREADS_Y);
dim3 dimGrid(grid_x, grid_y);
dim3 dimBlock(THREADS_X, THREADS_Y);
cudaMallocHost((void**)&A, sizeof(float) * N * N);
cudaMallocHost((void**)&B, sizeof(float) * N * N);
cudaMallocHost((void**)&C, sizeof(float) * N * N);
cudaMallocHost((void**)&subA, sizeof(float) * N * N/size);
cudaMallocHost((void**)&subB, sizeof(float) * N * N/size);
cudaMallocHost((void**)&subC, sizeof(float) * N * N/size);
/*cudaMallocHost((void**)&h_Arand, sizeof(float) * N * N);
cudaMalloc((void**)&d_Arand, sizeof(float) * N * N);
cudaMallocHost((void**)&h_Brand, sizeof(float) * N * N);
cudaMalloc((void**)&d_Brand, sizeof(float) * N * N);
curandState_t *states;
cudaMalloc((void**)&states, sizeof(curandState_t) * N * N);
init_rand<<<dimGrid, dimBlock>>>(1, states);
randoms<<<dimGrid,dimBlock>>>(states, d_Arand);
cudaMemcpy(h_Arand, d_Arand, sizeof(float) * N * N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
randoms<<<dimGrid,dimBlock>>>(states, d_Brand);
cudaMemcpy(h_Brand, d_Brand, sizeof(float) * N * N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
*/
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
A[N*i+j] = drand48();
//A[N*i+j] = h_Arand[N*i+j];
//printf("%f\n", A[N*i+j]);
B[N*i+j] = drand48();
//B[N*i+j] = h_Brand[N*i+j];
}
}
int offset = N/size*rank;
cudaMalloc((void**)&d_A, sizeof(float) * N * N);
cudaMalloc((void**)&d_B, sizeof(float) * N * N);
cudaMalloc((void**)&d_C, sizeof(float) * N * N);
cudaMalloc((void**)&d_subA, sizeof(float) * N * N/size);
cudaMalloc((void**)&d_subB, sizeof(float) * N * N/size);
cudaMalloc((void**)&d_subC, sizeof(float) * N * N/size);
cudaMemcpy(d_A, A, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_subA, subA, sizeof(float) * N * N/size, cudaMemcpyHostToDevice);
cudaMemcpy(d_subB, subB, sizeof(float) * N * N/size, cudaMemcpyHostToDevice);
cudaMemcpy(d_subC, subC, sizeof(float) * N * N/size, cudaMemcpyHostToDevice);
/*int K = 1;
int grid_x = ceil((N + THREADS_X - 1) / THREADS_X);
int grid_y = ceil((K + THREADS_Y - 1) / THREADS_Y);
dim3 dimGrid(grid_x, grid_y);
dim3 dimBlock(THREADS_X, THREADS_Y);
*/
/* for (int i=0; i<N/size; i++){
for (int j=0; j<N; j++){
subA[N*i+j] = A[N*(i+offset)+j];
}
}*/
subA_calc<<<dimGrid, dimBlock>>>(d_subA, d_A, N, offset,size);
cudaMemcpy(subA, d_subA, sizeof(float) * N * N/size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
/* for (int i=0; i<N; i++)
for (int j=0; j<N/size; j++)
subB[N/size*i+j] = B[N*i+j+offset];
*/
subB_calc<<<dimGrid, dimBlock>>>(d_subB, d_B, N, offset, size);
cudaMemcpy(subB, d_subB, sizeof(float) * N * N/size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
int recv_from = (rank + 1) % size;
int send_to = (rank - 1 + size) % size;
double comp_time = 0, comm_time = 0;
for(int irank=0; irank<size; irank++) {
auto tic = chrono::steady_clock::now();
offset = N/size*((rank+irank) % size);
/* for (int i=0; i<N/size; i++)
for (int j=0; j<N/size; j++)
for (int k=0; k<N; k++)
subC[N*i+j+offset] += subA[N*i+k] * subB[N/size*k+j];
*/
subC_calc<<<dimGrid, dimBlock>>>(d_subA, d_subB, d_subC, N, offset, size);
cudaMemcpy(subC, d_subC, sizeof(float) * N * N/size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
auto toc = chrono::steady_clock::now();
comp_time += chrono::duration<double>(toc - tic).count();
MPI_Request request[2];
MPI_Isend(&subB[0], N*N/size, MPI_FLOAT, send_to, 0, MPI_COMM_WORLD, &request[0]);
MPI_Irecv(&subB[0], N*N/size, MPI_FLOAT, recv_from, 0, MPI_COMM_WORLD, &request[1]);
MPI_Waitall(2, request, MPI_STATUS_IGNORE);
for (int i=0; i<N*N/size; i++)
subB[i] = recv[i];
tic = chrono::steady_clock::now();
comm_time += chrono::duration<double>(tic - toc).count();
}
MPI_Allgather(&subC[0], N*N/size, MPI_FLOAT, &C[0], N*N/size, MPI_FLOAT, MPI_COMM_WORLD);
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
for (int k=0; k<N; k++)
C[N*i+j] -= A[N*i+k] * B[N*k+j];
double err = 0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
err += fabs(C[N*i+j]);
if(rank==0) {
double time = comp_time+comm_time;
printf("N : %d\n",N);
printf("comp : %lf s\n", comp_time);
printf("comm : %lf s\n", comm_time);
printf("total: %lf s (%lf GFlops)\n",time,2.*N*N*N/time/1e9);
printf("error: %lf\n",err/N/N);
}
MPI_Finalize();
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(C);
cudaFreeHost(subA);
cudaFreeHost(subB);
cudaFreeHost(subC);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_subA);
cudaFree(d_subB);
cudaFree(d_subC);
}
|
114a13a0616d47e50c738bbf3747ae55b09ac9d2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <chrono>
#include <random>
#include <vector>
#include <hip/hip_runtime.h>
__global__
void filter_shared (int *__restrict__ dst,
int *__restrict__ nres,
const int*__restrict__ src,
int n)
{
__shared__ int l_n;
int i = blockIdx.x * blockDim.x + threadIdx.x;
// zero the counter
if (threadIdx.x == 0)
l_n = 0;
__syncthreads();
// get the value, evaluate the predicate, and
// increment the counter if needed
int d, pos;
if(i < n) {
d = src[i];
if(d > 0)
pos = atomicAdd(&l_n, 1);
}
__syncthreads();
// leader increments the global counter
if(threadIdx.x == 0)
l_n = atomicAdd(nres, l_n);
__syncthreads();
// threads with true predicates write their elements
if(i < n && d > 0) {
pos += l_n; // increment local pos by global counter
dst[pos] = d;
}
__syncthreads();
}
// compare device results with host results
bool check(int *d_nres, int *d_output, int h_nres, std::vector<int> &h_output) {
int nres;
hipMemcpy(&nres, d_nres, sizeof(int), hipMemcpyDeviceToHost);
std::vector<int> output (nres);
hipMemcpy(output.data(), d_output, sizeof(int) * nres, hipMemcpyDeviceToHost);
// clear device output
hipMemset(d_output, 0, sizeof(int) * nres);
std::sort(output.begin(), output.end());
bool equal = (h_nres == nres) &&
std::equal(h_output.begin(),
h_output.begin() + h_nres, output.begin());
return equal;
}
int main(int argc, char **argv) {
if (argc != 4) {
printf("Usage: %s <number of elements> <block size> <repeat>\n", argv[0]);
return 1;
}
const int num_elems = atoi(argv[1]);
const int block_size = atoi(argv[2]);
const int repeat = atoi(argv[3]);
std::vector<int> input (num_elems);
// Generate input data.
for (int i = 0; i < num_elems; i++) {
input[i] = i - num_elems / 2;
}
std::mt19937 g;
g.seed(19937);
std::shuffle(input.begin(), input.end(), g);
// Generate host output with host filtering code.
std::vector<int> h_output (num_elems);
int h_flt_count = 0;
for (int i = 0; i < num_elems; i++) {
if (input[i] > 0) {
h_output[h_flt_count++] = input[i];
}
}
// Sort the result for comparison
std::sort(h_output.begin(), h_output.begin() + h_flt_count);
// Filtering on a device
int *d_input, *d_output, *d_nres;
hipMalloc(&d_input, sizeof(int) * num_elems);
hipMalloc(&d_output, sizeof(int) * num_elems);
hipMalloc(&d_nres, sizeof(int));
hipMemcpy(d_input, input.data(),
sizeof(int) * num_elems, hipMemcpyHostToDevice);
dim3 dimBlock (block_size);
dim3 dimGrid ((num_elems + block_size - 1) / block_size);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipMemset(d_nres, 0, sizeof(int));
hipLaunchKernelGGL(( filter_shared), dim3(dimGrid), dim3(dimBlock), 0, 0, d_output, d_nres, d_input, num_elems);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of filter (shared memory) %lf (ms)\n",
(time * 1e-6) / repeat);
bool match = check(d_nres, d_output, h_flt_count, h_output);
printf("%s\n", match ? "PASS" : "FAIL");
hipFree(d_input);
hipFree(d_output);
hipFree(d_nres);
return 0;
}
| 114a13a0616d47e50c738bbf3747ae55b09ac9d2.cu | /**
* Copyright 1993-2017 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <chrono>
#include <random>
#include <vector>
#include <cuda.h>
__global__
void filter_shared (int *__restrict__ dst,
int *__restrict__ nres,
const int*__restrict__ src,
int n)
{
__shared__ int l_n;
int i = blockIdx.x * blockDim.x + threadIdx.x;
// zero the counter
if (threadIdx.x == 0)
l_n = 0;
__syncthreads();
// get the value, evaluate the predicate, and
// increment the counter if needed
int d, pos;
if(i < n) {
d = src[i];
if(d > 0)
pos = atomicAdd(&l_n, 1);
}
__syncthreads();
// leader increments the global counter
if(threadIdx.x == 0)
l_n = atomicAdd(nres, l_n);
__syncthreads();
// threads with true predicates write their elements
if(i < n && d > 0) {
pos += l_n; // increment local pos by global counter
dst[pos] = d;
}
__syncthreads();
}
// compare device results with host results
bool check(int *d_nres, int *d_output, int h_nres, std::vector<int> &h_output) {
int nres;
cudaMemcpy(&nres, d_nres, sizeof(int), cudaMemcpyDeviceToHost);
std::vector<int> output (nres);
cudaMemcpy(output.data(), d_output, sizeof(int) * nres, cudaMemcpyDeviceToHost);
// clear device output
cudaMemset(d_output, 0, sizeof(int) * nres);
std::sort(output.begin(), output.end());
bool equal = (h_nres == nres) &&
std::equal(h_output.begin(),
h_output.begin() + h_nres, output.begin());
return equal;
}
int main(int argc, char **argv) {
if (argc != 4) {
printf("Usage: %s <number of elements> <block size> <repeat>\n", argv[0]);
return 1;
}
const int num_elems = atoi(argv[1]);
const int block_size = atoi(argv[2]);
const int repeat = atoi(argv[3]);
std::vector<int> input (num_elems);
// Generate input data.
for (int i = 0; i < num_elems; i++) {
input[i] = i - num_elems / 2;
}
std::mt19937 g;
g.seed(19937);
std::shuffle(input.begin(), input.end(), g);
// Generate host output with host filtering code.
std::vector<int> h_output (num_elems);
int h_flt_count = 0;
for (int i = 0; i < num_elems; i++) {
if (input[i] > 0) {
h_output[h_flt_count++] = input[i];
}
}
// Sort the result for comparison
std::sort(h_output.begin(), h_output.begin() + h_flt_count);
// Filtering on a device
int *d_input, *d_output, *d_nres;
cudaMalloc(&d_input, sizeof(int) * num_elems);
cudaMalloc(&d_output, sizeof(int) * num_elems);
cudaMalloc(&d_nres, sizeof(int));
cudaMemcpy(d_input, input.data(),
sizeof(int) * num_elems, cudaMemcpyHostToDevice);
dim3 dimBlock (block_size);
dim3 dimGrid ((num_elems + block_size - 1) / block_size);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
cudaMemset(d_nres, 0, sizeof(int));
filter_shared<<<dimGrid, dimBlock>>>(d_output, d_nres, d_input, num_elems);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of filter (shared memory) %lf (ms)\n",
(time * 1e-6) / repeat);
bool match = check(d_nres, d_output, h_flt_count, h_output);
printf("%s\n", match ? "PASS" : "FAIL");
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_nres);
return 0;
}
|
a0199851351c6b13cdb0fcdcd832fa2e46b578dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstring> // memcpy
#include <cstdio>
#include <sstream>
#include <fstream>
//#include <omp.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define DEBUG 1
using namespace std;
#include "defines.cpp"
int shiftSize(int size) {
int shifts = 0;
int new_size = size;
while (new_size > 1) {
//cout << "." << flush;
new_size>>=1;
shifts++;
}
while (new_size < size) {
//cout << "/" << flush;
new_size<<=1;
}
return new_size;
}
int ** allocMatrix(int size) {
int ** matrix = new int*[size];
for (int i=0; i<size; i++) {
matrix[i] = new int[size];
}
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
matrix[i][j] = 0;
}
}
return matrix;
}
void printMatrix(int ** matrix, int size) {
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
cout << matrix[i][j] << "\t";
}
cout << endl;
}
cout << endl;
}
#ifdef alg_simd_classic
#include "simd_trivial.cpp"
#endif
#ifdef alg_simd_strassen
#include "simd_strassen.cpp"
#endif
#ifdef alg_cuda
#include "simt_trivial.cu"
#endif
#ifdef alg_cudas
#include "simt_strassen.cu"
#endif
void mainProccesLoop() {
#ifdef alg_simd_classic
trivial(size, matA, matB, matC);
#endif
#ifdef alg_simd_strassen
matC = strassen(size, matA, matB);
#endif
#ifdef alg_cuda
trivial(size, matA, matB, matC);
#endif
#ifdef alg_cudas
matC = strassen(size, matA, matB);
#endif
}
void debugMatrix() {
cout << endl << "matA: " << endl;
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
cout << matA[i][j] << " ";
}
cout << endl;
}
cout << endl << "matB: " << endl;
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
cout << matB[i][j] << " ";
}
cout << endl;
}
cout << endl << "matC: " << endl;
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
cout << matC[i][j] << " ";
}
cout << endl;
}
cout << endl;
}
int ** loadFromFile(int size, string filePath) {
string line;
ifstream file(filePath);
int linenumber = 0;
// Check if its correctly open
if (file.is_open()) {
// Alloc triangle
// only for Strassen
#ifdef alg_simd_strassen
size = shiftSize(size);
#endif
#ifdef alg_cudas
size = shiftSize(size);
#endif
int ** matrix = allocMatrix(size);
int number;
// Load all lines
while (getline(file, line)) {
// Procces line
stringstream ss;
ss << line;
// Load numbers to triangle
for (int i=0; i<prev_size; i++) {
// Get number from line
ss >> number;
// Save number to triangle
matrix[linenumber][i] = number;
}
linenumber++;
}
// Close file
file.close();
return matrix;
} else {
return NULL;
}
}
void init() {
// initialization
}
void cleanUp() {
// free matA
for (int i=0; i<size; i++) {
delete[] (matA[i]);
}
delete[](matA);
// free matB
for (int i=0; i<size; i++) {
delete[] (matB[i]);
}
delete[](matB);
// free matc
for (int i=0; i<size; i++) {
delete[] (matC[i]);
}
delete[] (matC);
}
int main (int argc, char **argv) {
// Check bad number of parameters
if (argc != 4) {
printf("\n\n\tusage:\t./a.out size matA matB\n\n\n");
return 1;
}
// Store arguments from command line
// Format is ./a.out size matA matB
size = stoi(argv[1]);
matA_file = argv[2];
matB_file = argv[3];
// Init default values
init();
prev_size = size;
// load input matrix
matA = loadFromFile(size, matA_file);
matB = loadFromFile(size, matB_file);
matC = allocMatrix(size);
// only for Strassen
#ifdef alg_simd_strassen
size = shiftSize(size);
#endif
#ifdef alg_cudas
size = shiftSize(size);
#endif
// double start = omp_get_wtime();
// Run main procces
mainProccesLoop();
// debugMatrix
/*if (size < 11) {
debugMatrix();
}*/
#ifdef DEBUG_PRINT
printMatrix(matC, prev_size);
#endif
#ifdef RESULT
printMatrix(matC, prev_size);
#endif
//printf("THREADS: %d \t time: \t %f \n", THREADS, omp_get_wtime()-start);
// Clean up data
cleanUp();
return 0;
}
| a0199851351c6b13cdb0fcdcd832fa2e46b578dc.cu | #include <iostream>
#include <cstring> // memcpy
#include <cstdio>
#include <sstream>
#include <fstream>
//#include <omp.h>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define DEBUG 1
using namespace std;
#include "defines.cpp"
int shiftSize(int size) {
int shifts = 0;
int new_size = size;
while (new_size > 1) {
//cout << "." << flush;
new_size>>=1;
shifts++;
}
while (new_size < size) {
//cout << "/" << flush;
new_size<<=1;
}
return new_size;
}
int ** allocMatrix(int size) {
int ** matrix = new int*[size];
for (int i=0; i<size; i++) {
matrix[i] = new int[size];
}
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
matrix[i][j] = 0;
}
}
return matrix;
}
void printMatrix(int ** matrix, int size) {
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
cout << matrix[i][j] << "\t";
}
cout << endl;
}
cout << endl;
}
#ifdef alg_simd_classic
#include "simd_trivial.cpp"
#endif
#ifdef alg_simd_strassen
#include "simd_strassen.cpp"
#endif
#ifdef alg_cuda
#include "simt_trivial.cu"
#endif
#ifdef alg_cudas
#include "simt_strassen.cu"
#endif
void mainProccesLoop() {
#ifdef alg_simd_classic
trivial(size, matA, matB, matC);
#endif
#ifdef alg_simd_strassen
matC = strassen(size, matA, matB);
#endif
#ifdef alg_cuda
trivial(size, matA, matB, matC);
#endif
#ifdef alg_cudas
matC = strassen(size, matA, matB);
#endif
}
void debugMatrix() {
cout << endl << "matA: " << endl;
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
cout << matA[i][j] << " ";
}
cout << endl;
}
cout << endl << "matB: " << endl;
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
cout << matB[i][j] << " ";
}
cout << endl;
}
cout << endl << "matC: " << endl;
for(int i = 0; i < size; i++) {
for(int j = 0; j < size; j++) {
cout << matC[i][j] << " ";
}
cout << endl;
}
cout << endl;
}
int ** loadFromFile(int size, string filePath) {
string line;
ifstream file(filePath);
int linenumber = 0;
// Check if its correctly open
if (file.is_open()) {
// Alloc triangle
// only for Strassen
#ifdef alg_simd_strassen
size = shiftSize(size);
#endif
#ifdef alg_cudas
size = shiftSize(size);
#endif
int ** matrix = allocMatrix(size);
int number;
// Load all lines
while (getline(file, line)) {
// Procces line
stringstream ss;
ss << line;
// Load numbers to triangle
for (int i=0; i<prev_size; i++) {
// Get number from line
ss >> number;
// Save number to triangle
matrix[linenumber][i] = number;
}
linenumber++;
}
// Close file
file.close();
return matrix;
} else {
return NULL;
}
}
void init() {
// initialization
}
void cleanUp() {
// free matA
for (int i=0; i<size; i++) {
delete[] (matA[i]);
}
delete[](matA);
// free matB
for (int i=0; i<size; i++) {
delete[] (matB[i]);
}
delete[](matB);
// free matc
for (int i=0; i<size; i++) {
delete[] (matC[i]);
}
delete[] (matC);
}
int main (int argc, char **argv) {
// Check bad number of parameters
if (argc != 4) {
printf("\n\n\tusage:\t./a.out size matA matB\n\n\n");
return 1;
}
// Store arguments from command line
// Format is ./a.out size matA matB
size = stoi(argv[1]);
matA_file = argv[2];
matB_file = argv[3];
// Init default values
init();
prev_size = size;
// load input matrix
matA = loadFromFile(size, matA_file);
matB = loadFromFile(size, matB_file);
matC = allocMatrix(size);
// only for Strassen
#ifdef alg_simd_strassen
size = shiftSize(size);
#endif
#ifdef alg_cudas
size = shiftSize(size);
#endif
// double start = omp_get_wtime();
// Run main procces
mainProccesLoop();
// debugMatrix
/*if (size < 11) {
debugMatrix();
}*/
#ifdef DEBUG_PRINT
printMatrix(matC, prev_size);
#endif
#ifdef RESULT
printMatrix(matC, prev_size);
#endif
//printf("THREADS: %d \t time: \t %f \n", THREADS, omp_get_wtime()-start);
// Clean up data
cleanUp();
return 0;
}
|
af8bd47c807625f0db8393bd036b009f1e694523.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <functional>
#include <include/utils.cuh>
#include <layers/element_wise_function.hpp>
#include <layers/elu_layer.hpp>
#include <linalg/binary_op.cuh>
#include <linalg/unary_op.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <typename T>
__global__ void elu_kernel(const T* input, T* output, int size, T alpha);
template <>
__global__ void elu_kernel<__half>(const __half* input, __half* output, int size, __half alpha) {
const __half2* input2 = reinterpret_cast<const __half2*>(input);
__half2* output2 = reinterpret_cast<__half2*>(output);
const int size2 = size / 2;
const __half2 alpha2 = __half2half2(alpha);
const __half zero = __float2half(0.0f);
const __half2 zero2 = __half2half2(zero);
const __half one = __float2half(1.0f);
const __half2 one2 = __half2half2(one);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size2) {
const __half2 in2 = input2[tid];
const __half2 pred = __hlt2(in2, zero2);
output2[tid] = pred * (alpha2 * (h2exp(in2) - one2)) + (one2 - pred) * in2;
}
if (tid == 0 && size % 2 > 0) {
const __half in = input[size - 1];
output[size - 1] = (in < zero) ? alpha * (hexp(in) - one) : in;
}
}
template <typename T>
__global__ void elu_dgrad_kernel(const T* d_out, T* d_in, int size, T alpha);
template <>
__global__ void elu_dgrad_kernel<__half>(const __half* d_out, __half* d_in, int size,
__half alpha) {
const __half2* d_out2 = reinterpret_cast<const __half2*>(d_out);
__half2* d_in2 = reinterpret_cast<__half2*>(d_in);
const int size2 = size / 2;
const __half2 alpha2 = __half2half2(alpha);
const __half zero = __float2half(0.0f);
const __half2 zero2 = __half2half2(zero);
const __half2 one2 = __float2half2_rn(1.0f);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size2) {
const __half2 in2 = d_in2[tid];
const __half2 out2 = d_out2[tid];
const __half2 pred = __hlt2(in2, zero2);
d_in2[tid] = pred * (alpha2 * h2exp(in2) * out2) + (one2 - pred) * out2;
}
if (tid == 0 && size % 2 > 0) {
const __half in = d_in[size - 1];
const __half out = d_out[size - 1];
d_in[size - 1] = (in < zero) ? alpha * hexp(in) * out : out;
}
}
} // namespace
template <typename T>
EluLayer<T>::EluLayer(const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor, T alpha,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource), alpha_(alpha) {
assert(in_tensor.get_num_elements() == out_tensor.get_num_elements());
in_tensors_.push_back(in_tensor);
out_tensors_.push_back(out_tensor);
}
template <typename T>
void EluLayer<T>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
const Tensor2<T>& in_tensor = in_tensors_[0];
Tensor2<T>& out_tensor = out_tensors_[0];
const int len = in_tensor.get_num_elements();
T alpha = alpha_;
auto fop = [alpha] __device__(T in) { return (in < 0) ? alpha * (expf(in) - 1) : in; };
MLCommon::LinAlg::unaryOp(out_tensor.get_ptr(), in_tensor.get_ptr(), len, fop,
get_gpu().get_stream());
}
template <>
void EluLayer<__half>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
const Tensor2<__half>& in_tensor = in_tensors_[0];
Tensor2<__half>& out_tensor = out_tensors_[0];
const int len = in_tensor.get_num_elements();
__half alpha = alpha_;
dim3 block_size(256, 1, 1);
dim3 grid_size((len / 2 + block_size.x - 1) / block_size.x, 1, 1);
hipLaunchKernelGGL(( elu_kernel), dim3(grid_size), dim3(block_size), 0, get_gpu().get_stream(),
in_tensor.get_ptr(), out_tensor.get_ptr(), len, alpha);
}
template <typename T>
void EluLayer<T>::bprop() {
CudaDeviceContext context(get_device_id());
Tensor2<T>& in_tensor = in_tensors_[0];
const Tensor2<T>& out_tensor = out_tensors_[0];
const int len = in_tensor.get_num_elements();
T alpha = alpha_;
auto bop = [alpha] __device__(T d_out, T d_in) {
return (d_in < 0) ? alpha * expf(d_in) * d_out : d_out;
};
MLCommon::LinAlg::binaryOp(in_tensor.get_ptr(), out_tensor.get_ptr(), in_tensor.get_ptr(), len,
bop, get_gpu().get_stream());
}
template <>
void EluLayer<__half>::bprop() {
CudaDeviceContext context(get_device_id());
Tensor2<__half>& in_tensor = in_tensors_[0];
const Tensor2<__half>& out_tensor = out_tensors_[0];
const int len = in_tensor.get_num_elements();
__half alpha = alpha_;
dim3 block_size(256, 1, 1);
dim3 grid_size((len / 2 + block_size.x - 1) / block_size.x, 1, 1);
hipLaunchKernelGGL(( elu_dgrad_kernel), dim3(grid_size), dim3(block_size), 0, get_gpu().get_stream(),
out_tensor.get_ptr(), in_tensor.get_ptr(), len, alpha);
}
template class EluLayer<float>;
template class EluLayer<__half>;
} // namespace HugeCTR
| af8bd47c807625f0db8393bd036b009f1e694523.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <functional>
#include <include/utils.cuh>
#include <layers/element_wise_function.hpp>
#include <layers/elu_layer.hpp>
#include <linalg/binary_op.cuh>
#include <linalg/unary_op.cuh>
#include <utils.hpp>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
template <typename T>
__global__ void elu_kernel(const T* input, T* output, int size, T alpha);
template <>
__global__ void elu_kernel<__half>(const __half* input, __half* output, int size, __half alpha) {
const __half2* input2 = reinterpret_cast<const __half2*>(input);
__half2* output2 = reinterpret_cast<__half2*>(output);
const int size2 = size / 2;
const __half2 alpha2 = __half2half2(alpha);
const __half zero = __float2half(0.0f);
const __half2 zero2 = __half2half2(zero);
const __half one = __float2half(1.0f);
const __half2 one2 = __half2half2(one);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size2) {
const __half2 in2 = input2[tid];
const __half2 pred = __hlt2(in2, zero2);
output2[tid] = pred * (alpha2 * (h2exp(in2) - one2)) + (one2 - pred) * in2;
}
if (tid == 0 && size % 2 > 0) {
const __half in = input[size - 1];
output[size - 1] = (in < zero) ? alpha * (hexp(in) - one) : in;
}
}
template <typename T>
__global__ void elu_dgrad_kernel(const T* d_out, T* d_in, int size, T alpha);
template <>
__global__ void elu_dgrad_kernel<__half>(const __half* d_out, __half* d_in, int size,
__half alpha) {
const __half2* d_out2 = reinterpret_cast<const __half2*>(d_out);
__half2* d_in2 = reinterpret_cast<__half2*>(d_in);
const int size2 = size / 2;
const __half2 alpha2 = __half2half2(alpha);
const __half zero = __float2half(0.0f);
const __half2 zero2 = __half2half2(zero);
const __half2 one2 = __float2half2_rn(1.0f);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size2) {
const __half2 in2 = d_in2[tid];
const __half2 out2 = d_out2[tid];
const __half2 pred = __hlt2(in2, zero2);
d_in2[tid] = pred * (alpha2 * h2exp(in2) * out2) + (one2 - pred) * out2;
}
if (tid == 0 && size % 2 > 0) {
const __half in = d_in[size - 1];
const __half out = d_out[size - 1];
d_in[size - 1] = (in < zero) ? alpha * hexp(in) * out : out;
}
}
} // namespace
template <typename T>
EluLayer<T>::EluLayer(const Tensor2<T>& in_tensor, const Tensor2<T>& out_tensor, T alpha,
const std::shared_ptr<GPUResource>& gpu_resource)
: Layer(gpu_resource), alpha_(alpha) {
assert(in_tensor.get_num_elements() == out_tensor.get_num_elements());
in_tensors_.push_back(in_tensor);
out_tensors_.push_back(out_tensor);
}
template <typename T>
void EluLayer<T>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
const Tensor2<T>& in_tensor = in_tensors_[0];
Tensor2<T>& out_tensor = out_tensors_[0];
const int len = in_tensor.get_num_elements();
T alpha = alpha_;
auto fop = [alpha] __device__(T in) { return (in < 0) ? alpha * (expf(in) - 1) : in; };
MLCommon::LinAlg::unaryOp(out_tensor.get_ptr(), in_tensor.get_ptr(), len, fop,
get_gpu().get_stream());
}
template <>
void EluLayer<__half>::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
const Tensor2<__half>& in_tensor = in_tensors_[0];
Tensor2<__half>& out_tensor = out_tensors_[0];
const int len = in_tensor.get_num_elements();
__half alpha = alpha_;
dim3 block_size(256, 1, 1);
dim3 grid_size((len / 2 + block_size.x - 1) / block_size.x, 1, 1);
elu_kernel<<<grid_size, block_size, 0, get_gpu().get_stream()>>>(
in_tensor.get_ptr(), out_tensor.get_ptr(), len, alpha);
}
template <typename T>
void EluLayer<T>::bprop() {
CudaDeviceContext context(get_device_id());
Tensor2<T>& in_tensor = in_tensors_[0];
const Tensor2<T>& out_tensor = out_tensors_[0];
const int len = in_tensor.get_num_elements();
T alpha = alpha_;
auto bop = [alpha] __device__(T d_out, T d_in) {
return (d_in < 0) ? alpha * expf(d_in) * d_out : d_out;
};
MLCommon::LinAlg::binaryOp(in_tensor.get_ptr(), out_tensor.get_ptr(), in_tensor.get_ptr(), len,
bop, get_gpu().get_stream());
}
template <>
void EluLayer<__half>::bprop() {
CudaDeviceContext context(get_device_id());
Tensor2<__half>& in_tensor = in_tensors_[0];
const Tensor2<__half>& out_tensor = out_tensors_[0];
const int len = in_tensor.get_num_elements();
__half alpha = alpha_;
dim3 block_size(256, 1, 1);
dim3 grid_size((len / 2 + block_size.x - 1) / block_size.x, 1, 1);
elu_dgrad_kernel<<<grid_size, block_size, 0, get_gpu().get_stream()>>>(
out_tensor.get_ptr(), in_tensor.get_ptr(), len, alpha);
}
template class EluLayer<float>;
template class EluLayer<__half>;
} // namespace HugeCTR
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.