hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
3749a5f2611cb861debf883a072ccf147e8ba177.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
__global__ void sobelShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) {
auto i = blockIdx.x * (blockDim.x-2) + threadIdx.x;
auto j = blockIdx.y * (blockDim.y-2) + threadIdx.y;
auto li = threadIdx.x;
auto lj = threadIdx.y;
extern __shared__ unsigned char sh[];
if( i < w && j < h ) {
// on s'occupe du rouge
sh[3 * (lj * blockDim.x + li) ] = data[ 3 * ( j * w + i ) ];
sh[3 * (lj * blockDim.x + li) + 1 ] = data[ 3 * ( j * w + i ) + 1];
sh[3 * (lj * blockDim.x + li) + 2 ] = data[ 3 * ( j * w + i ) + 2 ];
__syncthreads();
auto ww = blockDim.x;
if( li > 0 && li < (blockDim.x - 1) && lj > 0 && lj < (blockDim.y - 1) )
{
for (auto c = 0; c < 3; ++c){
auto hh = sh[ ((lj-1)*ww + li - 1)* 3 + c ] - sh[ ((lj-1)*ww + li + 1) * 3 + c ]
+ 2 * sh[ (lj*ww + li - 1) * 3 + c ] - 2* sh[ (lj*ww+li+1) * 3 + c]
+ sh[ ((lj+1)*ww + li -1) * 3 + c] - sh[ ((lj+1)*ww +li + 1) * 3 + c];
auto vv = sh[ ((lj-1)*ww + li - 1) * 3 + c ] - sh[ ((lj+1)*ww + li - 1) * 3 + c ]
+ 2 * sh[ ((lj-1)*ww + li) * 3 + c ] - 2* sh[ ((lj+1)*ww+li) * 3 + c ]
+ sh[ ((lj-1)*ww + li +1) * 3 + c] - sh[ ((lj+1)*ww +li + 1) * 3 + c];
auto res = hh * hh + vv * vv;
res = res > 255*255 ? res = 255*255 : res;
out[ (j * w + i) * 3 + c ] = sqrt( (float)res );
}
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
hipMalloc( &rgb_d, 3 * rows * cols);
hipMalloc( &out, 3 * rows * cols );
hipMemcpy( rgb_d, rgb, 3 * rows * cols, hipMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 bu( 3 * (( cols - 1) / (t.x-2) + 1) , ( rows - 1 ) / (t.y-2) + 1 );
// dim3 t( 16, 16 );
// dim3 bu( 3 * 2 * (( cols - 1) / (t.x-2) + 1) , (2 * rows - 1 ) / (t.y-2) + 1 );
// dim3 t( 4, 4 );
// dim3 bu( 3 * 8 *(( cols - 1) / (t.x-2) + 1) , (8 * rows - 1 ) / (t.y-2) + 1 );
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start );
hipLaunchKernelGGL(( sobelShared), dim3(bu), dim3(t), 3*t.x*t.y , 0, rgb_d, out, cols, rows );
hipMemcpy(g.data(), out, 3 * rows * cols, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
auto hipError_t = hipGetLastError();
// Si pas d'erreur dtecte dans le bordel ben on aura hipSuccess
if (hipError_t != hipSuccess){
std::cout << hipGetErrorName(hipError_t) << std::endl;
std::cout << hipGetErrorString(hipError_t) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
hipEventRecord( stop );
hipEventSynchronize( stop );
float duration = 0.0f;
hipEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outSobelShared.jpg", m_out );
hipFree( rgb_d);
hipFree ( out);
return 0;
}
| 3749a5f2611cb861debf883a072ccf147e8ba177.cu | #include <opencv2/opencv.hpp>
#include <vector>
__global__ void sobelShared ( unsigned char * data, unsigned char * out, std::size_t w, std::size_t h) {
auto i = blockIdx.x * (blockDim.x-2) + threadIdx.x;
auto j = blockIdx.y * (blockDim.y-2) + threadIdx.y;
auto li = threadIdx.x;
auto lj = threadIdx.y;
extern __shared__ unsigned char sh[];
if( i < w && j < h ) {
// on s'occupe du rouge
sh[3 * (lj * blockDim.x + li) ] = data[ 3 * ( j * w + i ) ];
sh[3 * (lj * blockDim.x + li) + 1 ] = data[ 3 * ( j * w + i ) + 1];
sh[3 * (lj * blockDim.x + li) + 2 ] = data[ 3 * ( j * w + i ) + 2 ];
__syncthreads();
auto ww = blockDim.x;
if( li > 0 && li < (blockDim.x - 1) && lj > 0 && lj < (blockDim.y - 1) )
{
for (auto c = 0; c < 3; ++c){
auto hh = sh[ ((lj-1)*ww + li - 1)* 3 + c ] - sh[ ((lj-1)*ww + li + 1) * 3 + c ]
+ 2 * sh[ (lj*ww + li - 1) * 3 + c ] - 2* sh[ (lj*ww+li+1) * 3 + c]
+ sh[ ((lj+1)*ww + li -1) * 3 + c] - sh[ ((lj+1)*ww +li + 1) * 3 + c];
auto vv = sh[ ((lj-1)*ww + li - 1) * 3 + c ] - sh[ ((lj+1)*ww + li - 1) * 3 + c ]
+ 2 * sh[ ((lj-1)*ww + li) * 3 + c ] - 2* sh[ ((lj+1)*ww+li) * 3 + c ]
+ sh[ ((lj-1)*ww + li +1) * 3 + c] - sh[ ((lj+1)*ww +li + 1) * 3 + c];
auto res = hh * hh + vv * vv;
res = res > 255*255 ? res = 255*255 : res;
out[ (j * w + i) * 3 + c ] = sqrt( (float)res );
}
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
cudaMalloc( &rgb_d, 3 * rows * cols);
cudaMalloc( &out, 3 * rows * cols );
cudaMemcpy( rgb_d, rgb, 3 * rows * cols, cudaMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 bu( 3 * (( cols - 1) / (t.x-2) + 1) , ( rows - 1 ) / (t.y-2) + 1 );
// dim3 t( 16, 16 );
// dim3 bu( 3 * 2 * (( cols - 1) / (t.x-2) + 1) , (2 * rows - 1 ) / (t.y-2) + 1 );
// dim3 t( 4, 4 );
// dim3 bu( 3 * 8 *(( cols - 1) / (t.x-2) + 1) , (8 * rows - 1 ) / (t.y-2) + 1 );
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
sobelShared<<< bu, t, 3*t.x*t.y >>>( rgb_d, out, cols, rows );
cudaMemcpy(g.data(), out, 3 * rows * cols, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
auto cudaError = cudaGetLastError();
// Si pas d'erreur détectée dans le bordel ben on aura cudaSuccess
if (cudaError != cudaSuccess){
std::cout << cudaGetErrorName(cudaError) << std::endl;
std::cout << cudaGetErrorString(cudaError) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float duration = 0.0f;
cudaEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outSobelShared.jpg", m_out );
cudaFree( rgb_d);
cudaFree ( out);
return 0;
}
|
34d9f46e51a700a19948ca6f23174b904b85ab68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//****************************
//File:MetricCenterGpu.cu
//author:Salil Rajadhyaksha
//version:1-Nov-2015
//***************************
//number of thread per block
#define NT 1024
//Structue for 2-D points
typedef struct
{
double x;
double y;
}vector_t;
//Structure for result to store radius and index.
typedef struct
{
double radius;
int index;
}ResultTuple;
//per thread variable in shared memory to store the max dist found by each thread
__shared__ ResultTuple points[NT];
/**
* Calculate the euclidean distance between two points.
*
* @param a :Index of first point.
* @param b :Index of second point.
* @param xy :array that holds the points.
* @return the euclidean distance.
*/
__device__ double calculateEuclideanDistance(int a,int b,vector_t *xy)
{
vector_t *pointA=&xy[a];
vector_t *pointB=&xy[b];
double dx=pointA->x - pointB->x;
double dy=pointA->y - pointB->y;
return sqrt(dx*dx+dy*dy);
}
/*
find maximum of the two points by comparing their radius
@param a: first point;
@param b: secon point;
@return a:The max of a and b stored in a;
*/
__device__ ResultTuple *findMaxDistance( ResultTuple *a ,ResultTuple *b)
{
if(a->radius < b->radius)
a->radius=b->radius;
return a;
}
/*
Return the minimum of the two points.
@param :a first point;
@param :b second point;
@returns :a:the minimum stored in a;
*/
__device__ ResultTuple *reduce(ResultTuple *a, ResultTuple *b)
{
if(a->radius>b->radius||a->radius==-1)
{
a->radius=b->radius;
a->index=b->index;
}
return a;
}
/**
return maximum between two doubles
@param x: pointer to first double value;
@param y: second value
@return max stored in x;
*/
__device__ double *returnMax(double *x,double y)
{
if(*x<y)
*x=y;
return x;
}
/**
Device Kernel to find the smallest distance between a set of points per block.
Called with a one dimensional grid.
Blocks= number of multiprocessors.
thread=1024 per block.
Each block calculates max distance for one point at a time in a for scheduled in leapfrog.
The threads calculate the max distance for that one point in the inner for schdeuled again in leapfrog.
@param :xy-List of points
@param :N- total number of points.
@param :result- the array to store the semi-final result per block;
*/
extern "C" __global__ void calculateRadius(vector_t *xy,int N,ResultTuple* result)
{
int thr,bID,numberOfBlocks;
double max;
thr=threadIdx.x;
bID=blockIdx.x;
numberOfBlocks=gridDim.x;
for(unsigned long long int i=bID;i<=N;i+=numberOfBlocks)//schedule points to blocks in leapfrog pattern.
{
max=0.00;
for(unsigned long long int j=thr;j<N;j+=NT)//calculate distance of for current point with respect to all points in leapfrog.
{
if(j==i)
continue;
returnMax(&max,calculateEuclideanDistance(i,j,xy)); //call to find the max passing the indices of points and current max.
}
points[thr]=(ResultTuple){max,i};//storing the max in the shared memory.
__syncthreads();
//calculate the maximum for that point via shared memory parallel reduction.
for (int k =NT/2;k>0;k>>=1)
{
if (thr<k)
{
findMaxDistance(&points[thr],&points[thr+k]);
}
__syncthreads();
}
//single threaded section.
if(thr==0)
{
reduce(&result[bID],&points[thr]);//reduce to store if current point is less than the min so far for this block.
}
}
} | 34d9f46e51a700a19948ca6f23174b904b85ab68.cu | //****************************
//File:MetricCenterGpu.cu
//author:Salil Rajadhyaksha
//version:1-Nov-2015
//***************************
//number of thread per block
#define NT 1024
//Structue for 2-D points
typedef struct
{
double x;
double y;
}vector_t;
//Structure for result to store radius and index.
typedef struct
{
double radius;
int index;
}ResultTuple;
//per thread variable in shared memory to store the max dist found by each thread
__shared__ ResultTuple points[NT];
/**
* Calculate the euclidean distance between two points.
*
* @param a :Index of first point.
* @param b :Index of second point.
* @param xy :array that holds the points.
* @return the euclidean distance.
*/
__device__ double calculateEuclideanDistance(int a,int b,vector_t *xy)
{
vector_t *pointA=&xy[a];
vector_t *pointB=&xy[b];
double dx=pointA->x - pointB->x;
double dy=pointA->y - pointB->y;
return sqrt(dx*dx+dy*dy);
}
/*
find maximum of the two points by comparing their radius
@param a: first point;
@param b: secon point;
@return a:The max of a and b stored in a;
*/
__device__ ResultTuple *findMaxDistance( ResultTuple *a ,ResultTuple *b)
{
if(a->radius < b->radius)
a->radius=b->radius;
return a;
}
/*
Return the minimum of the two points.
@param :a first point;
@param :b second point;
@returns :a:the minimum stored in a;
*/
__device__ ResultTuple *reduce(ResultTuple *a, ResultTuple *b)
{
if(a->radius>b->radius||a->radius==-1)
{
a->radius=b->radius;
a->index=b->index;
}
return a;
}
/**
return maximum between two doubles
@param x: pointer to first double value;
@param y: second value
@return max stored in x;
*/
__device__ double *returnMax(double *x,double y)
{
if(*x<y)
*x=y;
return x;
}
/**
Device Kernel to find the smallest distance between a set of points per block.
Called with a one dimensional grid.
Blocks= number of multiprocessors.
thread=1024 per block.
Each block calculates max distance for one point at a time in a for scheduled in leapfrog.
The threads calculate the max distance for that one point in the inner for schdeuled again in leapfrog.
@param :xy-List of points
@param :N- total number of points.
@param :result- the array to store the semi-final result per block;
*/
extern "C" __global__ void calculateRadius(vector_t *xy,int N,ResultTuple* result)
{
int thr,bID,numberOfBlocks;
double max;
thr=threadIdx.x;
bID=blockIdx.x;
numberOfBlocks=gridDim.x;
for(unsigned long long int i=bID;i<=N;i+=numberOfBlocks)//schedule points to blocks in leapfrog pattern.
{
max=0.00;
for(unsigned long long int j=thr;j<N;j+=NT)//calculate distance of for current point with respect to all points in leapfrog.
{
if(j==i)
continue;
returnMax(&max,calculateEuclideanDistance(i,j,xy)); //call to find the max passing the indices of points and current max.
}
points[thr]=(ResultTuple){max,i};//storing the max in the shared memory.
__syncthreads();
//calculate the maximum for that point via shared memory parallel reduction.
for (int k =NT/2;k>0;k>>=1)
{
if (thr<k)
{
findMaxDistance(&points[thr],&points[thr+k]);
}
__syncthreads();
}
//single threaded section.
if(thr==0)
{
reduce(&result[bID],&points[thr]);//reduce to store if current point is less than the min so far for this block.
}
}
} |
392eabf6a113b3cf177407baf182540906001b4e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/padding.hpp>
#include <strings/utilities.hpp>
#include <strings/utilities.cuh>
namespace cudf
{
namespace strings
{
namespace detail
{
namespace
{
struct compute_pad_output_length_fn
{
column_device_view d_strings;
size_type width;
size_type fill_char_size;
__device__ size_type operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0;
string_view d_str = d_strings.element<string_view>(idx);
size_type bytes = d_str.size_bytes();
size_type length = d_str.length();
if( width > length ) // no truncating
bytes += fill_char_size * (width - length); // add padding
return bytes;
}
};
}
//
std::unique_ptr<column> pad( strings_column_view const& strings,
size_type width, pad_side side = pad_side::RIGHT,
std::string const& fill_char = " ",
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0 )
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( !fill_char.empty(), "fill_char parameter must not be empty" );
char_utf8 d_fill_char = 0;
size_type fill_char_size = to_char_utf8(fill_char.c_str(), d_fill_char );
auto execpol = rmm::exec_policy(stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create null_mask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr );
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
compute_pad_output_length_fn{d_strings,width,fill_char_size} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
if( side==pad_side::LEFT)
{
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
[d_strings, width, d_fill_char, d_offsets, d_chars] __device__ (size_type idx) {
if( d_strings.is_null(idx) )
return;
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* ptr = d_chars + d_offsets[idx];
while( length++ < width )
ptr += from_char_utf8(d_fill_char,ptr);
copy_string( ptr, d_str );
});
}
else if( side==pad_side::RIGHT )
{
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
[d_strings, width, d_fill_char, d_offsets, d_chars] __device__ (size_type idx) {
if( d_strings.is_null(idx) )
return;
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* ptr = d_chars + d_offsets[idx];
ptr = copy_string(ptr, d_str);
while( length++ < width )
ptr += from_char_utf8(d_fill_char,ptr);
});
}
else if( side==pad_side::BOTH )
{
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
[d_strings, width, d_fill_char, d_offsets, d_chars] __device__ (size_type idx) {
if( d_strings.is_null(idx) )
return;
string_view d_str = d_strings.element<string_view>(idx);
char* ptr = d_chars + d_offsets[idx];
int32_t pad = static_cast<int32_t>(width - d_str.length());
auto right_pad = pad/2;
auto left_pad = pad - right_pad;
while( left_pad-- > 0 )
ptr += from_char_utf8(d_fill_char,ptr);
ptr = copy_string(ptr, d_str);
while( right_pad-- > 0 )
ptr += from_char_utf8(d_fill_char,ptr);
});
}
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
//
// Although zfill is identical to pad(width,'left','0') this implementation is a little
// more optimized since it does not need to calculate the size of the fillchar and can
// directly write it to the output buffer without extra logic for multi-byte UTF-8 chars.
//
std::unique_ptr<column> zfill( strings_column_view const& strings,
size_type width,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0 )
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy bitmask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr );
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
compute_pad_output_length_fn{d_strings,width,1} ); // fillchar is 1 byte
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
[d_strings, width, d_offsets, d_chars] __device__ (size_type idx) {
if( d_strings.is_null(idx) )
return;
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* out_ptr = d_chars + d_offsets[idx];
while( length++ < width )
*out_ptr++ = '0'; // prepend zero char
copy_string(out_ptr,d_str);
});
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
} // namespace detail
// external APIs
std::unique_ptr<column> pad( strings_column_view const& strings,
size_type width, pad_side side,
std::string const& fill_char,
rmm::mr::device_memory_resource* mr )
{
return detail::pad(strings,width,side,fill_char,mr);
}
std::unique_ptr<column> zfill( strings_column_view const& strings,
size_type width,
rmm::mr::device_memory_resource* mr )
{
return detail::zfill(strings,width,mr);
}
} // namespace strings
} // namespace cudf
| 392eabf6a113b3cf177407baf182540906001b4e.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/padding.hpp>
#include <strings/utilities.hpp>
#include <strings/utilities.cuh>
namespace cudf
{
namespace strings
{
namespace detail
{
namespace
{
struct compute_pad_output_length_fn
{
column_device_view d_strings;
size_type width;
size_type fill_char_size;
__device__ size_type operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0;
string_view d_str = d_strings.element<string_view>(idx);
size_type bytes = d_str.size_bytes();
size_type length = d_str.length();
if( width > length ) // no truncating
bytes += fill_char_size * (width - length); // add padding
return bytes;
}
};
}
//
std::unique_ptr<column> pad( strings_column_view const& strings,
size_type width, pad_side side = pad_side::RIGHT,
std::string const& fill_char = " ",
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0 )
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( !fill_char.empty(), "fill_char parameter must not be empty" );
char_utf8 d_fill_char = 0;
size_type fill_char_size = to_char_utf8(fill_char.c_str(), d_fill_char );
auto execpol = rmm::exec_policy(stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create null_mask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr );
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
compute_pad_output_length_fn{d_strings,width,fill_char_size} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
if( side==pad_side::LEFT)
{
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
[d_strings, width, d_fill_char, d_offsets, d_chars] __device__ (size_type idx) {
if( d_strings.is_null(idx) )
return;
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* ptr = d_chars + d_offsets[idx];
while( length++ < width )
ptr += from_char_utf8(d_fill_char,ptr);
copy_string( ptr, d_str );
});
}
else if( side==pad_side::RIGHT )
{
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
[d_strings, width, d_fill_char, d_offsets, d_chars] __device__ (size_type idx) {
if( d_strings.is_null(idx) )
return;
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* ptr = d_chars + d_offsets[idx];
ptr = copy_string(ptr, d_str);
while( length++ < width )
ptr += from_char_utf8(d_fill_char,ptr);
});
}
else if( side==pad_side::BOTH )
{
thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
[d_strings, width, d_fill_char, d_offsets, d_chars] __device__ (size_type idx) {
if( d_strings.is_null(idx) )
return;
string_view d_str = d_strings.element<string_view>(idx);
char* ptr = d_chars + d_offsets[idx];
int32_t pad = static_cast<int32_t>(width - d_str.length());
auto right_pad = pad/2;
auto left_pad = pad - right_pad;
while( left_pad-- > 0 )
ptr += from_char_utf8(d_fill_char,ptr);
ptr = copy_string(ptr, d_str);
while( right_pad-- > 0 )
ptr += from_char_utf8(d_fill_char,ptr);
});
}
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
//
// Although zfill is identical to pad(width,'left','0') this implementation is a little
// more optimized since it does not need to calculate the size of the fillchar and can
// directly write it to the output buffer without extra logic for multi-byte UTF-8 chars.
//
std::unique_ptr<column> zfill( strings_column_view const& strings,
size_type width,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0 )
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// copy bitmask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr );
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
compute_pad_output_length_fn{d_strings,width,1} ); // fillchar is 1 byte
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<cudf::size_type>(0), strings_count,
[d_strings, width, d_offsets, d_chars] __device__ (size_type idx) {
if( d_strings.is_null(idx) )
return;
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* out_ptr = d_chars + d_offsets[idx];
while( length++ < width )
*out_ptr++ = '0'; // prepend zero char
copy_string(out_ptr,d_str);
});
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
} // namespace detail
// external APIs
std::unique_ptr<column> pad( strings_column_view const& strings,
size_type width, pad_side side,
std::string const& fill_char,
rmm::mr::device_memory_resource* mr )
{
return detail::pad(strings,width,side,fill_char,mr);
}
std::unique_ptr<column> zfill( strings_column_view const& strings,
size_type width,
rmm::mr::device_memory_resource* mr )
{
return detail::zfill(strings,width,mr);
}
} // namespace strings
} // namespace cudf
|
ce84708b8c4933cb8b6692fbdc86c7fc73376252.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]);
float right = min(a[1], b[1]);
float width = max(right - left, 0.f);
float interS = width;
float Sa = (a[1] - a[0]);
float Sb = (b[1] - b[0]);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 2];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 2 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 2 + 0];
block_boxes[threadIdx.x * 2 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 2 + 1];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 2;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 2) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes,const at::Tensor scores, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
//auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
| ce84708b8c4933cb8b6692fbdc86c7fc73376252.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]);
float right = min(a[1], b[1]);
float width = max(right - left, 0.f);
float interS = width;
float Sa = (a[1] - a[0]);
float Sb = (b[1] - b[0]);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 2];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 2 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 2 + 0];
block_boxes[threadIdx.x * 2 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 2 + 1];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 2;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 2) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes,const at::Tensor scores, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
//auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
69ebba9c0e291adbef59f415468b1a1df85b7a8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Main.cuh"
extern string OUTPUTDIR;
extern double ROCHESMOOTHING, THICKNESSSMOOTHING, FLARINGINDEX;
extern double *CellAbscissa, *CellOrdinate, *forcesxi, *forcesyi, *forcesxo, *forcesyo;
extern double *CellAbscissa_d, *CellOrdinate_d;
extern double *fxi_d, *fxo_d, *fyi_d, *fyo_d;
extern double *Rmed, *Rmed_d, *Surf, *Surf_d, *example;
extern double *Dens_d;
extern int RocheSmoothing, size_grid, NRAD, NSEC, SelfGravity;
extern dim3 dimGrid2, dimBlock2;
__host__ void UpdateLog (Force *force, PlanetarySystem *sys, double *Dens, double *Energy, int TimeStep,
double PhysicalTime, int dimfxy)
{
FILE *out;
double x, y, r, m, vx, vy, smoothing, a, rh;
double *globalforce;
char filename[500];
char filename2[500];
int i, nb;
nb=sys->nb;
string input;
input = OUTPUTDIR +"tqwk";
strncpy(filename, input.c_str(), sizeof(filename));
filename[sizeof(filename)-1]=0;
for (i = 0; i < nb; i++){
x = sys->x[i];
y = sys->y[i];
vx = sys->vx[i];
vy = sys->vy[i];
r = sqrt(x*x+y*y);
m = sys->mass[i];
a = sqrt(x*x+y*y);
rh = pow(m/3., 1./3.)*a+1e-15;
if (RocheSmoothing) smoothing = r*pow(m/3.,1./3.)*ROCHESMOOTHING;
else smoothing = Compute_smoothing(r);
ComputeForce (force, Dens, x, y, smoothing, m, dimfxy, a, rh);
globalforce = force->GlobalForce;
sprintf (filename2, "%s%d.dat", filename,i);
out = fopen(filename2, "a");
if (out == NULL){
fprintf(stderr, "Can't open %s\n",filename2 );
fprintf(stderr, "Aborted.\n");
}
fprintf(out, "%d\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\n", TimeStep, \
x*force->fy_inner-y*force->fx_inner, \
x*force->fy_outer-y*force->fx_outer, \
x*force->fy_ex_inner-y*force->fx_ex_inner, \
x*force->fy_ex_outer-y*force->fx_ex_outer, \
vx*force->fx_inner+vy*force->fy_inner , \
vx*force->fx_outer+vy*force->fy_outer , \
vx*force->fx_ex_inner+vy*force->fy_ex_inner , \
vx*force->fx_ex_outer+vy*force->fy_ex_outer , PhysicalTime);
fclose (out);
if (!SelfGravity){
for (int k = 0; k < dimfxy; k++) {
sprintf( filename2, "%s%d_%d.dat", filename, i, k);
out = fopen(filename2, "a");
if (out == NULL){
fprintf(stderr, "Can't open %s\n", filename2);
fprintf(stderr, "Aborted.\n");
}
fprintf(out, "%d\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\n", TimeStep, \
x*globalforce[2*dimfxy+k]-y*globalforce[k], \
x*globalforce[3*dimfxy+k]-y*globalforce[dimfxy+k], \
vx*globalforce[k]+vy*globalforce[2*dimfxy+k], \
vx*globalforce[dimfxy+k]+vy*globalforce[3*dimfxy+k], PhysicalTime);
fclose (out);
}
}
}
}
__host__ Force *AllocateForce (int dimfxy)
{
Force *force;
double *globalforce;
force = (Force *)malloc(sizeof(Force));
globalforce = (double *)malloc(sizeof(double)*4*dimfxy);
for (int i = 0; i < 4*dimfxy; i++)
globalforce[i] = 0.0;
force->GlobalForce = globalforce;
return force;
}
__host__ void ComputeForce (Force *force, double *Dens, double x, double y, double rsmoothing,
double mass, int dimfxy, double a, double rh)
{
double *globalforce;
int k;
globalforce = force->GlobalForce;
for (k = 0; k < dimfxy; k++) {
gpuErrchk(hipMemset(fxi_d, 0, NRAD*NSEC*sizeof(double)));
gpuErrchk(hipMemset(fxo_d, 0, NRAD*NSEC*sizeof(double)));
gpuErrchk(hipMemset(fyi_d, 0, NRAD*NSEC*sizeof(double)));
gpuErrchk(hipMemset(fyo_d, 0, NRAD*NSEC*sizeof(double)));
hipLaunchKernelGGL(( ComputeForceKernel), dim3(dimGrid2), dim3(dimBlock2), 0, 0, CellAbscissa_d, CellOrdinate_d, Surf_d, Dens_d, x, y, rsmoothing,
NSEC, NRAD, a, Rmed_d, dimfxy, rh, fxi_d, fxo_d, fyi_d, fyo_d, k);
gpuErrchk(hipDeviceSynchronize());
globalforce[k] = DeviceReduce(fxi_d, NRAD*NSEC);
globalforce[k + dimfxy] = DeviceReduce(fxo_d, NRAD*NSEC);
globalforce[k + 2*dimfxy] = DeviceReduce(fyi_d, NRAD*NSEC);
globalforce[k + 3*dimfxy] = DeviceReduce(fyo_d, NRAD*NSEC);
}
force->fx_inner = globalforce[0];
force->fx_ex_inner = globalforce[dimfxy-1];
force->fx_outer = globalforce[dimfxy];
force->fx_ex_outer = globalforce[2*dimfxy-1];
force->fy_inner = globalforce[2*dimfxy];
force->fy_ex_inner = globalforce[3*dimfxy-1];
force->fy_outer = globalforce[3*dimfxy];
force->fy_ex_outer = globalforce[4*dimfxy-1];
force->GlobalForce = globalforce;
}
__host__ double Compute_smoothing(double r)
{
double smooth;
smooth = THICKNESSSMOOTHING * AspectRatioHost(r) * pow(r, 1.0+FLARINGINDEX);
return smooth;
}
__host__ void FreeForce (Force *force)
{
free (force->GlobalForce);
}
| 69ebba9c0e291adbef59f415468b1a1df85b7a8e.cu | #include "Main.cuh"
extern string OUTPUTDIR;
extern double ROCHESMOOTHING, THICKNESSSMOOTHING, FLARINGINDEX;
extern double *CellAbscissa, *CellOrdinate, *forcesxi, *forcesyi, *forcesxo, *forcesyo;
extern double *CellAbscissa_d, *CellOrdinate_d;
extern double *fxi_d, *fxo_d, *fyi_d, *fyo_d;
extern double *Rmed, *Rmed_d, *Surf, *Surf_d, *example;
extern double *Dens_d;
extern int RocheSmoothing, size_grid, NRAD, NSEC, SelfGravity;
extern dim3 dimGrid2, dimBlock2;
__host__ void UpdateLog (Force *force, PlanetarySystem *sys, double *Dens, double *Energy, int TimeStep,
double PhysicalTime, int dimfxy)
{
FILE *out;
double x, y, r, m, vx, vy, smoothing, a, rh;
double *globalforce;
char filename[500];
char filename2[500];
int i, nb;
nb=sys->nb;
string input;
input = OUTPUTDIR +"tqwk";
strncpy(filename, input.c_str(), sizeof(filename));
filename[sizeof(filename)-1]=0;
for (i = 0; i < nb; i++){
x = sys->x[i];
y = sys->y[i];
vx = sys->vx[i];
vy = sys->vy[i];
r = sqrt(x*x+y*y);
m = sys->mass[i];
a = sqrt(x*x+y*y);
rh = pow(m/3., 1./3.)*a+1e-15;
if (RocheSmoothing) smoothing = r*pow(m/3.,1./3.)*ROCHESMOOTHING;
else smoothing = Compute_smoothing(r);
ComputeForce (force, Dens, x, y, smoothing, m, dimfxy, a, rh);
globalforce = force->GlobalForce;
sprintf (filename2, "%s%d.dat", filename,i);
out = fopen(filename2, "a");
if (out == NULL){
fprintf(stderr, "Can't open %s\n",filename2 );
fprintf(stderr, "Aborted.\n");
}
fprintf(out, "%d\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\n", TimeStep, \
x*force->fy_inner-y*force->fx_inner, \
x*force->fy_outer-y*force->fx_outer, \
x*force->fy_ex_inner-y*force->fx_ex_inner, \
x*force->fy_ex_outer-y*force->fx_ex_outer, \
vx*force->fx_inner+vy*force->fy_inner , \
vx*force->fx_outer+vy*force->fy_outer , \
vx*force->fx_ex_inner+vy*force->fy_ex_inner , \
vx*force->fx_ex_outer+vy*force->fy_ex_outer , PhysicalTime);
fclose (out);
if (!SelfGravity){
for (int k = 0; k < dimfxy; k++) {
sprintf( filename2, "%s%d_%d.dat", filename, i, k);
out = fopen(filename2, "a");
if (out == NULL){
fprintf(stderr, "Can't open %s\n", filename2);
fprintf(stderr, "Aborted.\n");
}
fprintf(out, "%d\t%.18g\t%.18g\t%.18g\t%.18g\t%.18g\n", TimeStep, \
x*globalforce[2*dimfxy+k]-y*globalforce[k], \
x*globalforce[3*dimfxy+k]-y*globalforce[dimfxy+k], \
vx*globalforce[k]+vy*globalforce[2*dimfxy+k], \
vx*globalforce[dimfxy+k]+vy*globalforce[3*dimfxy+k], PhysicalTime);
fclose (out);
}
}
}
}
__host__ Force *AllocateForce (int dimfxy)
{
Force *force;
double *globalforce;
force = (Force *)malloc(sizeof(Force));
globalforce = (double *)malloc(sizeof(double)*4*dimfxy);
for (int i = 0; i < 4*dimfxy; i++)
globalforce[i] = 0.0;
force->GlobalForce = globalforce;
return force;
}
__host__ void ComputeForce (Force *force, double *Dens, double x, double y, double rsmoothing,
double mass, int dimfxy, double a, double rh)
{
double *globalforce;
int k;
globalforce = force->GlobalForce;
for (k = 0; k < dimfxy; k++) {
gpuErrchk(cudaMemset(fxi_d, 0, NRAD*NSEC*sizeof(double)));
gpuErrchk(cudaMemset(fxo_d, 0, NRAD*NSEC*sizeof(double)));
gpuErrchk(cudaMemset(fyi_d, 0, NRAD*NSEC*sizeof(double)));
gpuErrchk(cudaMemset(fyo_d, 0, NRAD*NSEC*sizeof(double)));
ComputeForceKernel<<<dimGrid2, dimBlock2>>>(CellAbscissa_d, CellOrdinate_d, Surf_d, Dens_d, x, y, rsmoothing,
NSEC, NRAD, a, Rmed_d, dimfxy, rh, fxi_d, fxo_d, fyi_d, fyo_d, k);
gpuErrchk(cudaDeviceSynchronize());
globalforce[k] = DeviceReduce(fxi_d, NRAD*NSEC);
globalforce[k + dimfxy] = DeviceReduce(fxo_d, NRAD*NSEC);
globalforce[k + 2*dimfxy] = DeviceReduce(fyi_d, NRAD*NSEC);
globalforce[k + 3*dimfxy] = DeviceReduce(fyo_d, NRAD*NSEC);
}
force->fx_inner = globalforce[0];
force->fx_ex_inner = globalforce[dimfxy-1];
force->fx_outer = globalforce[dimfxy];
force->fx_ex_outer = globalforce[2*dimfxy-1];
force->fy_inner = globalforce[2*dimfxy];
force->fy_ex_inner = globalforce[3*dimfxy-1];
force->fy_outer = globalforce[3*dimfxy];
force->fy_ex_outer = globalforce[4*dimfxy-1];
force->GlobalForce = globalforce;
}
__host__ double Compute_smoothing(double r)
{
double smooth;
smooth = THICKNESSSMOOTHING * AspectRatioHost(r) * pow(r, 1.0+FLARINGINDEX);
return smooth;
}
__host__ void FreeForce (Force *force)
{
free (force->GlobalForce);
}
|
20d21216c4c35a431aaae73d184bf61d563cec3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgecsrmv.cu normal z -> s, Tue Sep 2 12:38:32 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 256
#else
#define BLOCK_SIZE 256
#endif
// CSR-SpMV kernel
__global__ void
sgecsrmv_kernel( int num_rows, int num_cols,
float alpha,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
float dot = MAGMA_S_ZERO;
int start = d_rowptr[ row ];
int end = d_rowptr[ row+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d_x[ d_colind[j] ];
d_y[ row ] = dot *alpha + beta * d_y[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
sgecsrmv_kernel_shift( int num_rows, int num_cols,
float alpha,
float lambda,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *d_x,
float beta,
int offset,
int blocksize,
magma_index_t *add_rows,
float *d_y){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
float dot = MAGMA_S_ZERO;
int start = d_rowptr[ row ];
int end = d_rowptr[ row+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d_x[ d_colind[j] ];
if( row<blocksize )
d_y[ row ] = dot * alpha - lambda
* d_x[ offset+row ] + beta * d_y [ row ];
else
d_y[ row ] = dot * alpha - lambda
* d_x[ add_rows[row-blocksize] ] + beta * d_y [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in CSR
@param
d_rowptr magma_int_t*
rowpointer of A in CSR
@param
d_colind magma_int_t*
columnindices of A in CSR
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgecsrmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
float alpha,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( sgecsrmv_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, magma_stream ,
m, n, alpha, d_val, d_rowptr, d_colind, d_x, beta, d_y);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
alpha float
scalar multiplier
@param
lambda float
scalar multiplier
@param
d_val float*
array containing values of A in CSR
@param
d_rowptr magma_int_t*
rowpointer of A in CSR
@param
d_colind magma_int_t*
columnindices of A in CSR
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
offset magma_int_t
in case not the main diagonal is scaled
@param
blocksize magma_int_t
in case of processing multiple vectors
@param
add_rows magma_int_t*
in case the matrixpowerskernel is used
@param
d_y float*
output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgecsrmv_shift( magma_trans_t transA,
magma_int_t m, magma_int_t n,
float alpha,
float lambda,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *d_x,
float beta,
int offset,
int blocksize,
magma_index_t *add_rows,
float *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( sgecsrmv_kernel_shift), dim3(grid), dim3(BLOCK_SIZE), 0, magma_stream ,
m, n, alpha, lambda, d_val, d_rowptr, d_colind, d_x,
beta, offset, blocksize, add_rows, d_y);
return MAGMA_SUCCESS;
}
| 20d21216c4c35a431aaae73d184bf61d563cec3e.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgecsrmv.cu normal z -> s, Tue Sep 2 12:38:32 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 256
#else
#define BLOCK_SIZE 256
#endif
// CSR-SpMV kernel
__global__ void
sgecsrmv_kernel( int num_rows, int num_cols,
float alpha,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
float dot = MAGMA_S_ZERO;
int start = d_rowptr[ row ];
int end = d_rowptr[ row+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d_x[ d_colind[j] ];
d_y[ row ] = dot *alpha + beta * d_y[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
sgecsrmv_kernel_shift( int num_rows, int num_cols,
float alpha,
float lambda,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *d_x,
float beta,
int offset,
int blocksize,
magma_index_t *add_rows,
float *d_y){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
float dot = MAGMA_S_ZERO;
int start = d_rowptr[ row ];
int end = d_rowptr[ row+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d_x[ d_colind[j] ];
if( row<blocksize )
d_y[ row ] = dot * alpha - lambda
* d_x[ offset+row ] + beta * d_y [ row ];
else
d_y[ row ] = dot * alpha - lambda
* d_x[ add_rows[row-blocksize] ] + beta * d_y [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in CSR
@param
d_rowptr magma_int_t*
rowpointer of A in CSR
@param
d_colind magma_int_t*
columnindices of A in CSR
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgecsrmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
float alpha,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *d_x,
float beta,
float *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
sgecsrmv_kernel<<< grid, BLOCK_SIZE, 0, magma_stream >>>
(m, n, alpha, d_val, d_rowptr, d_colind, d_x, beta, d_y);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
alpha float
scalar multiplier
@param
lambda float
scalar multiplier
@param
d_val float*
array containing values of A in CSR
@param
d_rowptr magma_int_t*
rowpointer of A in CSR
@param
d_colind magma_int_t*
columnindices of A in CSR
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
offset magma_int_t
in case not the main diagonal is scaled
@param
blocksize magma_int_t
in case of processing multiple vectors
@param
add_rows magma_int_t*
in case the matrixpowerskernel is used
@param
d_y float*
output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgecsrmv_shift( magma_trans_t transA,
magma_int_t m, magma_int_t n,
float alpha,
float lambda,
float *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
float *d_x,
float beta,
int offset,
int blocksize,
magma_index_t *add_rows,
float *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
sgecsrmv_kernel_shift<<< grid, BLOCK_SIZE, 0, magma_stream >>>
(m, n, alpha, lambda, d_val, d_rowptr, d_colind, d_x,
beta, offset, blocksize, add_rows, d_y);
return MAGMA_SUCCESS;
}
|
14081df9b1a2475b1cc5667c0746ced996e6c312.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sort_helper.hpp"
#include <copying/legacy/scatter.hpp>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <bitmask/legacy/bit_mask.cuh>
#include <utilities/column_utils.hpp>
#include <utilities/cuda_utils.hpp>
#include <cudf/legacy/copying.hpp>
#include <thrust/scan.h>
#include <thrust/binary_search.h>
#include <thrust/unique.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <algorithm>
#include <tuple>
#include <numeric>
namespace {
/**
* @brief Compares two `table` rows for equality as if the table were
* ordered according to a specified permutation map.
*
*/
template <bool nullable = true>
struct permuted_row_equality_comparator {
row_equality_comparator<nullable> _comparator;
cudf::size_type const *_map;
/**
* @brief Construct a permuted_row_equality_comparator.
*
* @param t The `table` whose rows will be compared
* @param map The permutation map that specifies the effective ordering of
*`t`. Must be the same size as `t.num_rows()`
*/
permuted_row_equality_comparator(device_table const &t,
cudf::size_type const *map)
: _comparator(t, t, true), _map{map} {}
/**
* @brief Returns true if the two rows at the specified indices in the permuted
* order are equivalent.
*
* For example, comparing rows `i` and `j` is
* equivalent to comparing rows `map[i]` and `map[j]` in the original table.
*
* @param lhs The index of the first row
* @param rhs The index of the second row
* @returns if the two specified rows in the permuted order are equivalent
*/
CUDA_DEVICE_CALLABLE
bool operator()(cudf::size_type lhs, cudf::size_type rhs) {
return _comparator(_map[lhs], _map[rhs]);
}
};
} // namespace anonymous
namespace cudf {
namespace groupby {
namespace sort {
namespace detail {
cudf::size_type helper::num_keys() {
if (_num_keys > -1)
return _num_keys;
if (not _include_nulls and has_nulls(_keys)) {
// The number of rows w/o null values `n` is indicated by number of valid bits
// in the row bitmask. When `include_nulls == false`, then only rows `[0, n)`
// in the sorted order are considered for grouping.
CUDF_TRY(gdf_count_nonzero_mask(
reinterpret_cast<cudf::valid_type*>(keys_row_bitmask().data().get()),
_keys.num_rows(),
&_num_keys));
} else {
_num_keys = _keys.num_rows();
}
return _num_keys;
}
gdf_column const& helper::key_sort_order() {
if (_key_sorted_order)
return *_key_sorted_order;
_key_sorted_order = gdf_col_pointer(
new gdf_column(
allocate_column(gdf_dtype_of<cudf::size_type>(),
_keys.num_rows(),
false,
gdf_dtype_extra_info{},
_stream)),
[](gdf_column* col) { gdf_column_free(col); });
if (_keys_pre_sorted) {
auto d_key_sorted_order = static_cast<cudf::size_type*>(_key_sorted_order->data);
thrust::sequence(rmm::exec_policy(_stream)->on(_stream),
d_key_sorted_order,
d_key_sorted_order + _key_sorted_order->size, 0);
return *_key_sorted_order;
}
gdf_context context{};
context.flag_groupby_include_nulls = _include_nulls;
context.flag_null_sort_behavior = (_null_sort_behavior == null_order::AFTER)
? GDF_NULL_AS_LARGEST
: GDF_NULL_AS_SMALLEST;
if (_include_nulls ||
!cudf::has_nulls(_keys)) { // SQL style
CUDF_TRY(gdf_order_by(_keys.begin(), nullptr,
_keys.num_columns(), _key_sorted_order.get(),
&context));
} else { // Pandas style
// Temporarily replace the first column's bitmask with one that indicates the
// presence of a null value within a row. This allows moving all rows that contain
// a null value to the end of the sorted order.
gdf_column null_row_representative = *(_keys.get_column(0));
null_row_representative.valid =
reinterpret_cast<cudf::valid_type*>(keys_row_bitmask().data().get());
cudf::table keys{_keys};
std::vector<gdf_column*> modified_keys(keys.begin(), keys.end());
modified_keys[0] = &null_row_representative;
cudf::table modified_keys_table(modified_keys.data(),
modified_keys.size());
CUDF_TRY(gdf_order_by(modified_keys_table.begin(), nullptr,
modified_keys_table.num_columns(),
_key_sorted_order.get(), &context));
// All rows with one or more null values are at the end of the resulting sorted order.
}
return *_key_sorted_order;
}
rmm::device_vector<cudf::size_type> const& helper::group_offsets() {
if (_group_offsets)
return *_group_offsets;
_group_offsets = std::make_unique<index_vector>(num_keys());
auto device_input_table = device_table::create(_keys, _stream);
auto sorted_order = static_cast<cudf::size_type*>(key_sort_order().data);
decltype(_group_offsets->begin()) result_end;
auto exec = rmm::exec_policy(_stream)->on(_stream);
if (has_nulls(_keys)) {
result_end = thrust::unique_copy(exec,
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(num_keys()),
_group_offsets->begin(),
permuted_row_equality_comparator<true>(*device_input_table, sorted_order));
} else {
result_end = thrust::unique_copy(exec,
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(num_keys()),
_group_offsets->begin(),
permuted_row_equality_comparator<false>(*device_input_table, sorted_order));
}
cudf::size_type num_groups = thrust::distance(_group_offsets->begin(), result_end);
_group_offsets->resize(num_groups);
return *_group_offsets;
}
rmm::device_vector<cudf::size_type> const& helper::group_labels() {
if (_group_labels)
return *_group_labels;
// Get group labels for future use in segmented sorting
_group_labels = std::make_unique<index_vector>(num_keys());
auto& group_labels = *_group_labels;
auto exec = rmm::exec_policy(_stream)->on(_stream);
thrust::scatter(exec,
thrust::make_constant_iterator(1, decltype(num_groups())(1)),
thrust::make_constant_iterator(1, num_groups()),
group_offsets().begin() + 1,
group_labels.begin());
thrust::inclusive_scan(exec,
group_labels.begin(),
group_labels.end(),
group_labels.begin());
return group_labels;
}
gdf_column const& helper::unsorted_keys_labels() {
if (_unsorted_keys_labels)
return *_unsorted_keys_labels;
_unsorted_keys_labels = gdf_col_pointer(
new gdf_column(
allocate_column(gdf_dtype_of<cudf::size_type>(),
key_sort_order().size,
true,
gdf_dtype_extra_info{},
_stream)),
[](gdf_column* col) { gdf_column_free(col); });
CUDA_TRY(hipMemsetAsync(_unsorted_keys_labels->valid, 0,
gdf_num_bitmask_elements(_unsorted_keys_labels->size),
_stream));
gdf_column group_labels_col{};
gdf_column_view(&group_labels_col,
const_cast<cudf::size_type*>(group_labels().data().get()),
nullptr,
group_labels().size(),
gdf_dtype_of<cudf::size_type>());
cudf::table t_sorted_labels{&group_labels_col};
cudf::table t_unsorted_keys_labels{_unsorted_keys_labels.get()};
cudf::detail::scatter(&t_sorted_labels,
static_cast<cudf::size_type*>(key_sort_order().data),
&t_unsorted_keys_labels);
return *_unsorted_keys_labels;
}
rmm::device_vector<bit_mask::bit_mask_t>&
helper::keys_row_bitmask() {
if (_keys_row_bitmask)
return *_keys_row_bitmask;
_keys_row_bitmask =
bitmask_vec_pointer( new bitmask_vector(row_bitmask(_keys, _stream)));
return *_keys_row_bitmask;
}
std::pair<gdf_column, rmm::device_vector<cudf::size_type> >
helper::sort_values(gdf_column const& values) {
CUDF_EXPECTS(values.size == _keys.num_rows(),
"Size mismatch between keys and values.");
auto values_sort_order = gdf_col_pointer(
new gdf_column(
allocate_column(gdf_dtype_of<cudf::size_type>(),
_keys.num_rows(),
false,
gdf_dtype_extra_info{},
_stream)),
[](gdf_column* col) { gdf_column_free(col); });
// Need to const_cast because there cannot be a table constructor that can
// take const initializer list. Making separate constructors for const objects
// is not supported in C++14 https://stackoverflow.com/a/49151864/3325146
auto unsorted_values = const_cast<gdf_column*> (&values);
auto unsorted_label_col = const_cast<gdf_column*> (&unsorted_keys_labels());
auto unsorted_table = cudf::table{unsorted_label_col, unsorted_values};
gdf_context context{};
context.flag_groupby_include_nulls = _include_nulls;
gdf_order_by(unsorted_table.begin(),
nullptr,
unsorted_table.num_columns(), // always 2
values_sort_order.get(),
&context);
cudf::table unsorted_values_table{unsorted_values};
auto sorted_values = allocate_like(values, num_keys(), RETAIN, _stream);
cudf::table sorted_values_table{&sorted_values};
cudf::gather(&unsorted_values_table,
static_cast<cudf::size_type*>(values_sort_order->data),
&sorted_values_table);
// Get number of valid values in each group
rmm::device_vector<cudf::size_type> val_group_sizes(num_groups());
auto col_valid = reinterpret_cast<bit_mask::bit_mask_t*>(sorted_values.valid);
auto bitmask_iterator = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[col_valid] __device__ (cudf::size_type i) -> int {
return (col_valid) ? bit_mask::is_valid(col_valid, i) : true;
});
thrust::reduce_by_key(rmm::exec_policy(_stream)->on(_stream),
group_labels().begin(),
group_labels().end(),
bitmask_iterator,
thrust::make_discard_iterator(),
val_group_sizes.begin());
return std::make_pair(sorted_values, val_group_sizes);
}
cudf::table helper::unique_keys() {
cudf::table unique_keys = allocate_like(_keys,
(cudf::size_type)num_groups(),
RETAIN,
_stream);
auto idx_data = static_cast<cudf::size_type*>(key_sort_order().data);
auto transformed_group_ids = index_vector(num_groups());
auto exec = rmm::exec_policy(_stream)->on(_stream);
thrust::transform(exec, group_offsets().begin(), group_offsets().end(),
transformed_group_ids.begin(),
[=] __device__ (cudf::size_type i) { return idx_data[i]; } );
cudf::gather(&_keys,
transformed_group_ids.data().get(),
&unique_keys);
return unique_keys;
}
} // namespace detail
} // namespace sort
} // namespace groupby
} // namespace cudf
| 14081df9b1a2475b1cc5667c0746ced996e6c312.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sort_helper.hpp"
#include <copying/legacy/scatter.hpp>
#include <table/legacy/device_table.cuh>
#include <table/legacy/device_table_row_operators.cuh>
#include <bitmask/legacy/bit_mask.cuh>
#include <utilities/column_utils.hpp>
#include <utilities/cuda_utils.hpp>
#include <cudf/legacy/copying.hpp>
#include <thrust/scan.h>
#include <thrust/binary_search.h>
#include <thrust/unique.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <algorithm>
#include <tuple>
#include <numeric>
namespace {
/**
* @brief Compares two `table` rows for equality as if the table were
* ordered according to a specified permutation map.
*
*/
template <bool nullable = true>
struct permuted_row_equality_comparator {
row_equality_comparator<nullable> _comparator;
cudf::size_type const *_map;
/**
* @brief Construct a permuted_row_equality_comparator.
*
* @param t The `table` whose rows will be compared
* @param map The permutation map that specifies the effective ordering of
*`t`. Must be the same size as `t.num_rows()`
*/
permuted_row_equality_comparator(device_table const &t,
cudf::size_type const *map)
: _comparator(t, t, true), _map{map} {}
/**
* @brief Returns true if the two rows at the specified indices in the permuted
* order are equivalent.
*
* For example, comparing rows `i` and `j` is
* equivalent to comparing rows `map[i]` and `map[j]` in the original table.
*
* @param lhs The index of the first row
* @param rhs The index of the second row
* @returns if the two specified rows in the permuted order are equivalent
*/
CUDA_DEVICE_CALLABLE
bool operator()(cudf::size_type lhs, cudf::size_type rhs) {
return _comparator(_map[lhs], _map[rhs]);
}
};
} // namespace anonymous
namespace cudf {
namespace groupby {
namespace sort {
namespace detail {
cudf::size_type helper::num_keys() {
if (_num_keys > -1)
return _num_keys;
if (not _include_nulls and has_nulls(_keys)) {
// The number of rows w/o null values `n` is indicated by number of valid bits
// in the row bitmask. When `include_nulls == false`, then only rows `[0, n)`
// in the sorted order are considered for grouping.
CUDF_TRY(gdf_count_nonzero_mask(
reinterpret_cast<cudf::valid_type*>(keys_row_bitmask().data().get()),
_keys.num_rows(),
&_num_keys));
} else {
_num_keys = _keys.num_rows();
}
return _num_keys;
}
gdf_column const& helper::key_sort_order() {
if (_key_sorted_order)
return *_key_sorted_order;
_key_sorted_order = gdf_col_pointer(
new gdf_column(
allocate_column(gdf_dtype_of<cudf::size_type>(),
_keys.num_rows(),
false,
gdf_dtype_extra_info{},
_stream)),
[](gdf_column* col) { gdf_column_free(col); });
if (_keys_pre_sorted) {
auto d_key_sorted_order = static_cast<cudf::size_type*>(_key_sorted_order->data);
thrust::sequence(rmm::exec_policy(_stream)->on(_stream),
d_key_sorted_order,
d_key_sorted_order + _key_sorted_order->size, 0);
return *_key_sorted_order;
}
gdf_context context{};
context.flag_groupby_include_nulls = _include_nulls;
context.flag_null_sort_behavior = (_null_sort_behavior == null_order::AFTER)
? GDF_NULL_AS_LARGEST
: GDF_NULL_AS_SMALLEST;
if (_include_nulls ||
!cudf::has_nulls(_keys)) { // SQL style
CUDF_TRY(gdf_order_by(_keys.begin(), nullptr,
_keys.num_columns(), _key_sorted_order.get(),
&context));
} else { // Pandas style
// Temporarily replace the first column's bitmask with one that indicates the
// presence of a null value within a row. This allows moving all rows that contain
// a null value to the end of the sorted order.
gdf_column null_row_representative = *(_keys.get_column(0));
null_row_representative.valid =
reinterpret_cast<cudf::valid_type*>(keys_row_bitmask().data().get());
cudf::table keys{_keys};
std::vector<gdf_column*> modified_keys(keys.begin(), keys.end());
modified_keys[0] = &null_row_representative;
cudf::table modified_keys_table(modified_keys.data(),
modified_keys.size());
CUDF_TRY(gdf_order_by(modified_keys_table.begin(), nullptr,
modified_keys_table.num_columns(),
_key_sorted_order.get(), &context));
// All rows with one or more null values are at the end of the resulting sorted order.
}
return *_key_sorted_order;
}
rmm::device_vector<cudf::size_type> const& helper::group_offsets() {
if (_group_offsets)
return *_group_offsets;
_group_offsets = std::make_unique<index_vector>(num_keys());
auto device_input_table = device_table::create(_keys, _stream);
auto sorted_order = static_cast<cudf::size_type*>(key_sort_order().data);
decltype(_group_offsets->begin()) result_end;
auto exec = rmm::exec_policy(_stream)->on(_stream);
if (has_nulls(_keys)) {
result_end = thrust::unique_copy(exec,
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(num_keys()),
_group_offsets->begin(),
permuted_row_equality_comparator<true>(*device_input_table, sorted_order));
} else {
result_end = thrust::unique_copy(exec,
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(num_keys()),
_group_offsets->begin(),
permuted_row_equality_comparator<false>(*device_input_table, sorted_order));
}
cudf::size_type num_groups = thrust::distance(_group_offsets->begin(), result_end);
_group_offsets->resize(num_groups);
return *_group_offsets;
}
rmm::device_vector<cudf::size_type> const& helper::group_labels() {
if (_group_labels)
return *_group_labels;
// Get group labels for future use in segmented sorting
_group_labels = std::make_unique<index_vector>(num_keys());
auto& group_labels = *_group_labels;
auto exec = rmm::exec_policy(_stream)->on(_stream);
thrust::scatter(exec,
thrust::make_constant_iterator(1, decltype(num_groups())(1)),
thrust::make_constant_iterator(1, num_groups()),
group_offsets().begin() + 1,
group_labels.begin());
thrust::inclusive_scan(exec,
group_labels.begin(),
group_labels.end(),
group_labels.begin());
return group_labels;
}
gdf_column const& helper::unsorted_keys_labels() {
if (_unsorted_keys_labels)
return *_unsorted_keys_labels;
_unsorted_keys_labels = gdf_col_pointer(
new gdf_column(
allocate_column(gdf_dtype_of<cudf::size_type>(),
key_sort_order().size,
true,
gdf_dtype_extra_info{},
_stream)),
[](gdf_column* col) { gdf_column_free(col); });
CUDA_TRY(cudaMemsetAsync(_unsorted_keys_labels->valid, 0,
gdf_num_bitmask_elements(_unsorted_keys_labels->size),
_stream));
gdf_column group_labels_col{};
gdf_column_view(&group_labels_col,
const_cast<cudf::size_type*>(group_labels().data().get()),
nullptr,
group_labels().size(),
gdf_dtype_of<cudf::size_type>());
cudf::table t_sorted_labels{&group_labels_col};
cudf::table t_unsorted_keys_labels{_unsorted_keys_labels.get()};
cudf::detail::scatter(&t_sorted_labels,
static_cast<cudf::size_type*>(key_sort_order().data),
&t_unsorted_keys_labels);
return *_unsorted_keys_labels;
}
rmm::device_vector<bit_mask::bit_mask_t>&
helper::keys_row_bitmask() {
if (_keys_row_bitmask)
return *_keys_row_bitmask;
_keys_row_bitmask =
bitmask_vec_pointer( new bitmask_vector(row_bitmask(_keys, _stream)));
return *_keys_row_bitmask;
}
std::pair<gdf_column, rmm::device_vector<cudf::size_type> >
helper::sort_values(gdf_column const& values) {
CUDF_EXPECTS(values.size == _keys.num_rows(),
"Size mismatch between keys and values.");
auto values_sort_order = gdf_col_pointer(
new gdf_column(
allocate_column(gdf_dtype_of<cudf::size_type>(),
_keys.num_rows(),
false,
gdf_dtype_extra_info{},
_stream)),
[](gdf_column* col) { gdf_column_free(col); });
// Need to const_cast because there cannot be a table constructor that can
// take const initializer list. Making separate constructors for const objects
// is not supported in C++14 https://stackoverflow.com/a/49151864/3325146
auto unsorted_values = const_cast<gdf_column*> (&values);
auto unsorted_label_col = const_cast<gdf_column*> (&unsorted_keys_labels());
auto unsorted_table = cudf::table{unsorted_label_col, unsorted_values};
gdf_context context{};
context.flag_groupby_include_nulls = _include_nulls;
gdf_order_by(unsorted_table.begin(),
nullptr,
unsorted_table.num_columns(), // always 2
values_sort_order.get(),
&context);
cudf::table unsorted_values_table{unsorted_values};
auto sorted_values = allocate_like(values, num_keys(), RETAIN, _stream);
cudf::table sorted_values_table{&sorted_values};
cudf::gather(&unsorted_values_table,
static_cast<cudf::size_type*>(values_sort_order->data),
&sorted_values_table);
// Get number of valid values in each group
rmm::device_vector<cudf::size_type> val_group_sizes(num_groups());
auto col_valid = reinterpret_cast<bit_mask::bit_mask_t*>(sorted_values.valid);
auto bitmask_iterator = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[col_valid] __device__ (cudf::size_type i) -> int {
return (col_valid) ? bit_mask::is_valid(col_valid, i) : true;
});
thrust::reduce_by_key(rmm::exec_policy(_stream)->on(_stream),
group_labels().begin(),
group_labels().end(),
bitmask_iterator,
thrust::make_discard_iterator(),
val_group_sizes.begin());
return std::make_pair(sorted_values, val_group_sizes);
}
cudf::table helper::unique_keys() {
cudf::table unique_keys = allocate_like(_keys,
(cudf::size_type)num_groups(),
RETAIN,
_stream);
auto idx_data = static_cast<cudf::size_type*>(key_sort_order().data);
auto transformed_group_ids = index_vector(num_groups());
auto exec = rmm::exec_policy(_stream)->on(_stream);
thrust::transform(exec, group_offsets().begin(), group_offsets().end(),
transformed_group_ids.begin(),
[=] __device__ (cudf::size_type i) { return idx_data[i]; } );
cudf::gather(&_keys,
transformed_group_ids.data().get(),
&unique_keys);
return unique_keys;
}
} // namespace detail
} // namespace sort
} // namespace groupby
} // namespace cudf
|
47fa22eeb357fe691d071abb9b0d58c68c896fac.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ComputeL2Distance.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *corrData = NULL;
hipMalloc(&corrData, XSIZE*YSIZE);
int numPts1 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ComputeL2Distance), dim3(gridBlock),dim3(threadBlock), 0, 0, corrData,numPts1);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ComputeL2Distance), dim3(gridBlock),dim3(threadBlock), 0, 0, corrData,numPts1);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ComputeL2Distance), dim3(gridBlock),dim3(threadBlock), 0, 0, corrData,numPts1);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 47fa22eeb357fe691d071abb9b0d58c68c896fac.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ComputeL2Distance.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *corrData = NULL;
cudaMalloc(&corrData, XSIZE*YSIZE);
int numPts1 = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ComputeL2Distance<<<gridBlock,threadBlock>>>(corrData,numPts1);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ComputeL2Distance<<<gridBlock,threadBlock>>>(corrData,numPts1);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ComputeL2Distance<<<gridBlock,threadBlock>>>(corrData,numPts1);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ffdad5961048ebbdd6f7484fe565c70a9b0a181b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2015, 2016, 2017 Ingo Steinwart
//
// This file is part of liquidSVM.
//
// liquidSVM is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// liquidSVM is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with liquidSVM. If not, see <http://www.gnu.org/licenses/>.
#if !defined (CUDA_SIMPLE_VECTOR_OPERATIONS_CU)
#define CUDA_SIMPLE_VECTOR_OPERATIONS_CU
#include "sources/shared/system_support/cuda_simple_vector_operations.h"
//**********************************************************************************************************************************
const int threads_per_block_simple_operations = 32;
__global__ void init_vector (double* vector_GPU, unsigned size, double value = 0.0);
__global__ void mult_vector (double coefficient, double* vector_GPU, unsigned size);
//**********************************************************************************************************************************
void init_vector_on_GPU(double* vector_GPU, unsigned size, double value)
{
unsigned grid_size_y;
dim3 grid_size;
dim3 block_size;
grid_size_y = (size - 1)/threads_per_block_simple_operations + 1;
grid_size = dim3(1, grid_size_y, 1);
block_size = dim3(1, threads_per_block_simple_operations, 1);
if (size > 0)
hipLaunchKernelGGL(( init_vector) , dim3(grid_size), dim3(block_size) , 0, 0, vector_GPU, size, value);
}
//**********************************************************************************************************************************
void mult_vector_on_GPU(double coefficient, double* vector_GPU, unsigned size)
{
unsigned grid_size_y;
dim3 grid_size;
dim3 block_size;
grid_size_y = (size - 1)/threads_per_block_simple_operations + 1;
grid_size = dim3(1, grid_size_y, 1);
block_size = dim3(1, threads_per_block_simple_operations, 1);
if (size > 0)
hipLaunchKernelGGL(( mult_vector) , dim3(grid_size), dim3(block_size) , 0, 0, coefficient, vector_GPU, size);
}
//**********************************************************************************************************************************
__global__ void init_vector(double* vector_GPU, unsigned size, double value)
{
unsigned j;
j = blockIdx.y*blockDim.y + threadIdx.y;
if (j < size)
vector_GPU[j] = value;
}
//**********************************************************************************************************************************
__global__ void mult_vector(double coefficient, double* vector_GPU, unsigned size)
{
unsigned j;
j = blockIdx.y*blockDim.y + threadIdx.y;
if (j < size)
vector_GPU[j] = coefficient * vector_GPU[j];
}
#endif
| ffdad5961048ebbdd6f7484fe565c70a9b0a181b.cu | // Copyright 2015, 2016, 2017 Ingo Steinwart
//
// This file is part of liquidSVM.
//
// liquidSVM is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// liquidSVM is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with liquidSVM. If not, see <http://www.gnu.org/licenses/>.
#if !defined (CUDA_SIMPLE_VECTOR_OPERATIONS_CU)
#define CUDA_SIMPLE_VECTOR_OPERATIONS_CU
#include "sources/shared/system_support/cuda_simple_vector_operations.h"
//**********************************************************************************************************************************
const int threads_per_block_simple_operations = 32;
__global__ void init_vector (double* vector_GPU, unsigned size, double value = 0.0);
__global__ void mult_vector (double coefficient, double* vector_GPU, unsigned size);
//**********************************************************************************************************************************
void init_vector_on_GPU(double* vector_GPU, unsigned size, double value)
{
unsigned grid_size_y;
dim3 grid_size;
dim3 block_size;
grid_size_y = (size - 1)/threads_per_block_simple_operations + 1;
grid_size = dim3(1, grid_size_y, 1);
block_size = dim3(1, threads_per_block_simple_operations, 1);
if (size > 0)
init_vector <<< grid_size, block_size >>> (vector_GPU, size, value);
}
//**********************************************************************************************************************************
void mult_vector_on_GPU(double coefficient, double* vector_GPU, unsigned size)
{
unsigned grid_size_y;
dim3 grid_size;
dim3 block_size;
grid_size_y = (size - 1)/threads_per_block_simple_operations + 1;
grid_size = dim3(1, grid_size_y, 1);
block_size = dim3(1, threads_per_block_simple_operations, 1);
if (size > 0)
mult_vector <<< grid_size, block_size >>> (coefficient, vector_GPU, size);
}
//**********************************************************************************************************************************
__global__ void init_vector(double* vector_GPU, unsigned size, double value)
{
unsigned j;
j = blockIdx.y*blockDim.y + threadIdx.y;
if (j < size)
vector_GPU[j] = value;
}
//**********************************************************************************************************************************
__global__ void mult_vector(double coefficient, double* vector_GPU, unsigned size)
{
unsigned j;
j = blockIdx.y*blockDim.y + threadIdx.y;
if (j < size)
vector_GPU[j] = coefficient * vector_GPU[j];
}
#endif
|
d222314e7940a35aa5c3ae2f18e70ad808583693.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_7.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7278378798970,0.00124683785460400,0.783600532542734,0.783451216299390,0.000170840217018618,0.486751117524943,0.00290707269945428,0.999998399879982,1.88274662357417e-08,1.85125538579548e-05,0.999771968830020,1.00716814076148,0.999996330327535,4.34584098863557e-05,0.262582811563238,10.1909174640447,139.649590217400};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.5712760941292,0.000430386069753157,0.000123954049331097,0.000339134345249555,0.275716122609344,0.118177339481679,0.171110543073973,4.97589596104639,0.0143070702358362,1.84740470131292,1098.07431966868,0.000411463768659304,0.558750994902965,0.00904110237316287,0.00475280604119224,7.62229127897770e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| d222314e7940a35aa5c3ae2f18e70ad808583693.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S2_7.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7278378798970,0.00124683785460400,0.783600532542734,0.783451216299390,0.000170840217018618,0.486751117524943,0.00290707269945428,0.999998399879982,1.88274662357417e-08,1.85125538579548e-05,0.999771968830020,1.00716814076148,0.999996330327535,4.34584098863557e-05,0.262582811563238,10.1909174640447,139.649590217400};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.5712760941292,0.000430386069753157,0.000123954049331097,0.000339134345249555,0.275716122609344,0.118177339481679,0.171110543073973,4.97589596104639,0.0143070702358362,1.84740470131292,1098.07431966868,0.000411463768659304,0.558750994902965,0.00904110237316287,0.00475280604119224,7.62229127897770e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
c9873f0eb35690cdf47f053432d617233ba485d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*Adapted From "Fast Hough Trasform on GPU's"
*
*/
#include"hough.hpp"
#include"cuda_error_check.hpp"
bool debug_hough = false;
#define THREADS_X_HOUGH 32
#define THREADS_Y_HOUGH 4
#define PIXELS_PER_THREAD 16
__device__ static int g_counter;
__device__ static int g_counter_lines;
extern __shared__ int shmem[];
void print_array(float *arr, int size)
{
for(int i =0;i<size;i++)
{
cout<<*(arr + i)<<"\t";
}
cout<<endl;
}
void print_image(unsigned char *image, int height, int width)
{
for(int i =0;i<height;i++)
{
for(int j =0;j<width;j++)
{
cout<<(int)*(image + i*width + j)<<"\t";
}
cout<<endl;
}
}
__global__ void getNonzeroEdgepoints(unsigned char const* const image, unsigned int* const list)
{
__shared__ unsigned int s_queues[THREADS_Y_HOUGH][THREADS_X_HOUGH * PIXELS_PER_THREAD];
__shared__ int s_qsize[THREADS_Y_HOUGH];
__shared__ int s_globStart[THREADS_Y_HOUGH];
const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if(threadIdx.x == 0)
s_qsize[threadIdx.y] = 0;
__syncthreads();
if(y < IMG_HEIGHT)
{
const unsigned char* srcRow = image + y*IMG_WIDTH;
for(int i = 0,xx = x; i<PIXELS_PER_THREAD && xx < IMG_WIDTH;++i,xx +=
blockDim.x)
{
if(srcRow[xx])
{
const unsigned int val = (y<<16)|xx;
const int qidx = atomicAdd(&s_qsize[threadIdx.y],1);
s_queues[threadIdx.y][qidx] = val;
}
}
}
__syncthreads();
if(threadIdx.x == 0 && threadIdx.y == 0 )
{
int totalSize = 0;
for(int i =0;i<blockDim.y;++i)
{
s_globStart[i] = totalSize;
totalSize += s_qsize[i];
}
const int global_Offset = atomicAdd(&g_counter, totalSize);
for(int i =0 ;i<blockDim.y;++i)
s_globStart[i] += global_Offset;
}
__syncthreads();
const int qsize = s_qsize[threadIdx.y];
int gidx = s_globStart[threadIdx.y] + threadIdx.x;
for(int i = threadIdx.x; i<qsize; i+=blockDim.x, gidx +=blockDim.x)
{
list[gidx] = s_queues[threadIdx.y][i];
}
}
__global__ void fillHoughSpace(unsigned int* const list, const int count, int* hough_space,const float irho, const float theta, const int numrho)
{
int* smem = (int*)shmem;
for(int i =threadIdx.x; i< numrho + 1;i+=blockDim.x)
smem[i] = 0;
__syncthreads();
const int n = blockIdx.x;
const float ang = n*theta;
//printf("The angle value of n is %d \n", blockIdx.x);
//printf("Angle Values : %f \n", ang);
//printf("Inside Kernel");
float sinVal;
float cosVal;
sincosf(ang, &sinVal, &cosVal);
sinVal *= irho;
cosVal *= irho;
const int shift = (numrho -1)/2;
for(int i = threadIdx.x; i<count; i+= blockDim.x)
{
const unsigned int val = list[i];
const int x = (val & 0xFFFF);
const int y = (val >> 16) & 0xFFFF;
int r = __float2int_rn(x*cosVal + y*sinVal);
//printf("The value of x %d and the value of y %d : the value of r %d \n",x,y,r);
r += shift;
atomicAdd(&smem[r+1],1);
}
__syncthreads();
int* hrow = hough_space + (n+1)*(numrho + 2);
for(int i = threadIdx.x ;i< numrho + 1; i+=blockDim.x)
{
//printf("value of shared_memory at %d is %d \n",i,smem[i]);
hrow[i] = smem[i];
}
}
/*Non Maximum Suppression to get Valid Values*/
__global__ void getLines(const int * hough_space, float2* lines, int* votes, const int
maxLines, const float rho, const float theta, const int threshold, const
int numrho, const int rhspace)
{
const int r = blockIdx.x*blockDim.x + threadIdx.x;
const int n = blockIdx.y*blockDim.y + threadIdx.y;
if(r >=numrho || n >=rhspace -2)
{
return;
}
const int curVotes = *(hough_space + (n+1)*(numrho + 2)+ (r+1));
if(curVotes > *(hough_space + n*(numrho+2) + (r-1)) &&
curVotes > *(hough_space + n*(numrho + 2) + r) &&
curVotes > *(hough_space + n*(numrho + 2)+(r+1)) &&
curVotes > *(hough_space + n*(numrho + 2) + (r+2)) &&
curVotes > *(hough_space + n*(numrho+2) + (r+3)) &&
curVotes > *(hough_space + (n+1)*(numrho +2)+ r-1) &&
curVotes > *(hough_space + (n+1)*(numrho + 2) + r) &&
curVotes > *(hough_space +(n+1)*(numrho +2) + (r+2)) &&
curVotes > *(hough_space +(n+1)*(numrho +2) + (r+3)) &&
curVotes > *(hough_space +(n+2)*(numrho +2) + (r-1)) &&
curVotes > *(hough_space + (n+2)*(numrho +2) + r) &&
curVotes > *(hough_space + (n+2)*(numrho +2) + (r+1)) &&
curVotes > *(hough_space + (n+2)*(numrho +2) + (r+2)) &&
curVotes > *(hough_space + (n+2)*(numrho +2) + (r+3)) && curVotes > threshold)
{
const float radius = (r - (numrho -1)*0.5f)*rho;
const float angle = n*theta;
const int index = atomicAdd(&g_counter_lines,1);
if(index < maxLines)
{
//printf("index Value - %d \n", index);
//printf("Current Votes - %d \n", curVotes);
//printf("radius %f and angle %f \n", radius, angle);
//*(lines + index) = make_float2(radius, angle);
(lines + index)->x = radius;
(lines + index)->y = angle;
//printf("value of radius - %f and value of angle - %f and curVotes - %d \n ", (lines +index)->x,(lines + index)->y, curVotes);
*(votes + index) = curVotes;
}
}
}
lines_w_non_zero* houghTransform(unsigned char const* const edges,const int numangle, const int numrho,float thetaStep, float rStep)
{
/* if(debug_hough)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
}
*/
/*Replace by maximum function using cuda*/
const int threshold = 20;
unsigned char* gimage;
unsigned int* glist;
void* counterPtr;
hipGetSymbolAddress(&counterPtr, g_counter);
hipMemset(counterPtr,0,sizeof(int));
CudaCheckError();
hipFuncSetCacheConfig(getNonzeroEdgepoints, hipFuncCachePreferShared);
hipMalloc((void**)&gimage, IMG_SIZE*sizeof(unsigned char));
CudaCheckError();
hipMalloc((void**) &glist, IMG_SIZE*sizeof(unsigned int));
CudaCheckError();
/*Copy Image to GPU */
hipMemcpy(gimage, edges, IMG_SIZE*sizeof(unsigned char),hipMemcpyHostToDevice);
CudaCheckError();
dim3 dimBlock1(THREADS_X_HOUGH, THREADS_Y_HOUGH);
//dim3 dimGrid1(1, 56);
dim3 dimGrid1((IMG_WIDTH + THREADS_X_HOUGH*PIXELS_PER_THREAD
-1)/(THREADS_X_HOUGH*PIXELS_PER_THREAD), (IMG_HEIGHT +
THREADS_Y_HOUGH -1)/(THREADS_Y_HOUGH));
hipLaunchKernelGGL(( getNonzeroEdgepoints), dim3(dimGrid1),dim3(dimBlock1), 0, 0, gimage, glist);
CudaCheckError();
hipDeviceSynchronize();
int totalCount ;
hipMemcpy(&totalCount, counterPtr, sizeof(int),hipMemcpyDeviceToHost);
//cout<<"Total Count :"<<totalCount<<endl;
unsigned int* clist = (unsigned int*)malloc(totalCount*sizeof(unsigned int));
hipMemcpy(clist, glist, totalCount*sizeof(unsigned int),hipMemcpyDeviceToHost);
CudaCheckError();
if(debug_hough)
{
unsigned int* clist = (unsigned int*)malloc(totalCount*sizeof(unsigned int));
hipMemcpy(clist, glist, totalCount*sizeof(unsigned int),hipMemcpyDeviceToHost);
CudaCheckError();
for(int i = 0; i< totalCount; i++)
{
unsigned int const q_value = clist[i];
cout<<"q_value : "<<q_value<<endl;
const int x = (q_value & 0xFFFF);
const int y = (q_value >> 16 ) & 0xFFFF;
cout<<"coordinate ("<<x<<","<<y<<")"<<endl;
cout<<"Value at coordinate :"<<(int)*(edges + y*IMG_WIDTH + x)<<endl;
}
}
//Initialize hough_space
int hough_size = (numangle + 2)*(numrho + 2);
int rhspace = numangle + 2;
int colhspace = numrho + 2;
//cout<<"rows : "<<rhspace<<endl;
const dim3 block(1024);
const dim3 grid(rhspace -2);
//smemSize should be less than 49152 bytes
size_t smemSize = (colhspace - 1)*sizeof(int);
cout<<smemSize<<endl;
thetaStep = thetaStep*(CV_PI/180);
/*Allocate houghSpace on Gpu*/
int *d_hough_space;
hipMalloc((void**)&d_hough_space,hough_size*sizeof(int));
CudaCheckError();
hipMemset(d_hough_space, 0, hough_size*sizeof(int));
CudaCheckError();
hipLaunchKernelGGL(( fillHoughSpace), dim3(grid),dim3(block), smemSize, 0, glist, totalCount,d_hough_space, 1.0f/rStep, thetaStep, colhspace -2);
CudaCheckError();
hipDeviceSynchronize();
if(debug_hough)
{
int* hough_space = (int*)malloc(hough_size*sizeof(int));
hipMemcpy(hough_space, d_hough_space, hough_size*sizeof(int),hipMemcpyDeviceToHost);
CudaCheckError();
for(int i =0;i<rhspace;i++)
{
for(int j =0;j<colhspace;j++)
{
cout<<*(hough_space + i*colhspace +j)<<"\t";
}
cout<<endl;
}
}
int maxLines = 75;
float2* d_lines;
int* d_votes;
hipMalloc((void**)&d_lines,maxLines*sizeof(float2));
CudaCheckError();
hipMalloc((void**)&d_votes, maxLines*sizeof(int));
CudaCheckError();
void *counterPtr_lines;
hipGetSymbolAddress(&counterPtr_lines, g_counter_lines);
hipMemset(counterPtr_lines, 0, sizeof(int));
CudaCheckError();
const dim3 block_1(32,8);
const int blocks_x = ((colhspace - 2 + block_1.x - 1)/(block_1.x));
const int blocks_y = ((rhspace - 2 + block_1.y -1 )/(block_1.y));
const dim3 grid_1(blocks_x, blocks_y);
hipFuncSetCacheConfig(getLines, hipFuncCachePreferL1);
hipLaunchKernelGGL(( getLines), dim3(grid_1), dim3(block_1), 0, 0, d_hough_space, d_lines, d_votes, maxLines,rStep, thetaStep, threshold, colhspace -2, rhspace);
CudaCheckError();
hipDeviceSynchronize();
int countlines;
hipMemcpy(&countlines, counterPtr_lines, sizeof(int),hipMemcpyDeviceToHost);
CudaCheckError();
countlines = min(countlines, maxLines);
float2* lines = (float2*)malloc(countlines*sizeof(float2));
int* votes = (int*)malloc(countlines*sizeof(int));
hipMemcpy(lines, d_lines, countlines*sizeof(float2),hipMemcpyDeviceToHost);
CudaCheckError();
hipMemcpy(votes, d_votes, countlines*sizeof(int),hipMemcpyDeviceToHost);
CudaCheckError();
map<float, vector<floatwint>> theta_to_votes_map;
for(int i =0;i<countlines;i++)
{
floatwint obj = {(float)(lines + i)->x, *(votes + i) };
theta_to_votes_map[(lines + i)->y].push_back(obj);
}
for(auto it = theta_to_votes_map.begin() ;it!= theta_to_votes_map.end(); ++it)
{
sort(it->second.begin() , it->second.end(), [](const floatwint& lhs, const floatwint& rhs) {return lhs.y > rhs.y;});
//cout<<"Vector Size \t"<<theta_to_votes_map[it->first].size()<<endl;
}
/*
int nSelectedLines = theta_to_votes_map.size();
float2* selLines = (float2*)malloc(nSelectedLines*sizeof(float2));
int* selvotes = (int*)malloc(nSelectedLines*sizeof(selvotes));
int index = 0;
*/
/*
for(auto it = theta_to_votes_map.begin(); it!= theta_to_votes_map.end(); ++it)
{
(selLines + index)->x = (it->second.begin())->x;
(selLines + index)->y = it->first;
*(selvotes + index) = (it->second.begin())->y;
index++;
}
*/
auto it = theta_to_votes_map.begin();
auto it_1 = next(it);
int index = 0;
/*count number of lines above 50 votes*/
int count_lines = 0;
int threshold_votes = 38;
for(auto it2 = it->second.begin(); it2 != it->second.end(); ++it2)
{
if(it2->y > threshold_votes)
count_lines++;
}
for(auto it2 = it_1->second.begin(); it2 != it_1->second.end();++it2)
{
if(it2->y > threshold_votes)
count_lines++;
}
/*
for(auto it = next(it_1) ; it != theta_to_votes_map.end();++it)
{
if(it->second.begin()->y > threshold_votes)
count_lines++;
}
*/
cout<<"Count of Lines \t"<<count_lines<<endl;
int nSelectedLines;
float2* selLines;
int* selvotes;
//int nSelectedLines = theta_to_votes_map[it->first].size() + theta_to_votes_map[it_1->first].size();
if(count_lines > 0)
{
nSelectedLines = count_lines;
selLines = (float2*)malloc(nSelectedLines*sizeof(float2));
selvotes = (int*)malloc(nSelectedLines*sizeof(selvotes));
for(auto it2 = it->second.begin(); it2 != it->second.end(); ++it2)
{
if(it2->y > threshold_votes)
{
(selLines + index)->x = (it2)->x;
(selLines + index)->y = it->first;
*(selvotes + index) = (it2)->y;
index++;
}
}
for(auto it2 = it_1->second.begin(); it2 != it_1->second.end();++it2)
{
if(it2->y > threshold_votes)
{
(selLines + index)->x = (it2)->x;
(selLines + index)->y = it_1->first;
*(selvotes + index) = (it2)->y;
index++;
}
}
/*
for(auto it = next(it_1); it != theta_to_votes_map.end();++it)
{
if(it->second.begin()->y > threshold_votes)
{
(selLines + index)->x = (it->second.begin())->x;
(selLines + index)->y = it->first;
*(selvotes + index) = (it->second.begin())->y;
index++;
}
}
*/
}
else
{
/*
if(theta_to_votes_map[it->first].size() > 2)
{
nSelectedLines += 2;
}
*/
//nSelectedLines = theta_to_votes_map[it->first].size() + theta_to_votes_map[it_1->first].size();
if(debug_hough)
{
cout<<"Key value \t"<<it->first<<" \t Vector Values \t"<<endl;
cout<<"Size \t"<<theta_to_votes_map[it->first].size()<<endl;
}
nSelectedLines = theta_to_votes_map[it->first].size();
selLines = (float2*)malloc(nSelectedLines*sizeof(float2));
selvotes = (int*)malloc(nSelectedLines*sizeof(selvotes));
for(auto it2 = it->second.begin(); it2 != it->second.end(); ++it2)
{
(selLines + index)->x = (it2)->x;
(selLines + index)->y = it->first;
*(selvotes + index) = (it2)->y;
index++;
}
/*
for(auto it2 = it_1->second.begin(); it2 != it_1->second.end();++it2)
{
(selLines + index)->x = (it2)->x;
(selLines + index)->y = it_1->first;
*(selvotes + index) = (it2)->y;
index++;
}
*/
}
if(debug_hough)
{
for(int i = 0 ; i < index;i++)
{
cout<<"Theta Value \t"<<(selLines + i)->y<<"\t"<<"Rho Value\t"<<(selLines + i)->x<<endl;
cout<<"Votes \t"<<*(selvotes + i)<<endl;
}
}
if(debug_hough)
{
for(auto it = theta_to_votes_map.begin() ;it!= theta_to_votes_map.end(); ++it)
{
cout<<"Key value \t"<<it->first<<" \t Vector Values \t";
for(auto it2 = it->second.begin(); it2 != it->second.end(); ++it2)
{
cout<<it2->x<<"\t"<<it2->y<<"\t";
}
cout<<endl;
}
}
if(debug_hough)
{
Mat gray_image = imread("/home/nvidia/IPM_test_image_10.png", 0);
for(int i =0;i<nSelectedLines;i++)
{
//float theta_line = (lines + i)->y;
float theta_line = (selLines + i)->y;
//float rho = (lines + i)->x;
float rho = (selLines + i)->x;
//int curr_votes = *(votes + i);
int curr_votes = *(selvotes + i);
cout<<"Rho - "<<rho<<" \t theta- "<<theta_line<<endl;
cout<<"Corresponding Votes \t"<<curr_votes<<endl;
cv::Point pt1, pt2;
double a = cos(theta_line);
double b = sin(theta_line);
double x0 = a*rho;
double y0 = b*rho;
pt1.x = (int)(x0 + 400*(-b));
pt1.y = (int)(y0 + 400*(a));
pt2.x = (int)(x0 - 400*(-b));
pt2.y = (int)(x0 - 400*(a));
line(gray_image, pt1,pt2, (255,0,0),2);
}
imwrite("/home/nvidia/inital_guess_hough_transform.png",
gray_image);
imshow("Image", gray_image);
waitKey(0);
}
lines_w_non_zero* values = (lines_w_non_zero*)malloc(sizeof(lines_w_non_zero));
lin_votes* mem_hough_lines = (lin_votes*)malloc(sizeof(lin_votes));
values->hough_lines = mem_hough_lines;
//values->hough_lines->lines = lines;
values->hough_lines->lines = selLines;
//values->hough_lines->countlines = countlines;
values->hough_lines->countlines = nSelectedLines;
values->clist = clist;
values->count = totalCount;
values->votes = selvotes;
//values->votes = votes;
/*
lin_votes* hough_lines = (lin_votes*)malloc(sizeof(lin_votes));
hough_lines->lines = lines;
hough_lines->countlines = countlines;
*/
/*
if(debug_hough)
{
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float elapsed = 0;
hipEventElapsedTime(&elapsed, start, stop);
cout<<"Elapsed Time"<<elapsed;
}
*/
return values;
}
| c9873f0eb35690cdf47f053432d617233ba485d4.cu | /*
*Adapted From "Fast Hough Trasform on GPU's"
*
*/
#include"hough.hpp"
#include"cuda_error_check.hpp"
bool debug_hough = false;
#define THREADS_X_HOUGH 32
#define THREADS_Y_HOUGH 4
#define PIXELS_PER_THREAD 16
__device__ static int g_counter;
__device__ static int g_counter_lines;
extern __shared__ int shmem[];
void print_array(float *arr, int size)
{
for(int i =0;i<size;i++)
{
cout<<*(arr + i)<<"\t";
}
cout<<endl;
}
void print_image(unsigned char *image, int height, int width)
{
for(int i =0;i<height;i++)
{
for(int j =0;j<width;j++)
{
cout<<(int)*(image + i*width + j)<<"\t";
}
cout<<endl;
}
}
__global__ void getNonzeroEdgepoints(unsigned char const* const image, unsigned int* const list)
{
__shared__ unsigned int s_queues[THREADS_Y_HOUGH][THREADS_X_HOUGH * PIXELS_PER_THREAD];
__shared__ int s_qsize[THREADS_Y_HOUGH];
__shared__ int s_globStart[THREADS_Y_HOUGH];
const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if(threadIdx.x == 0)
s_qsize[threadIdx.y] = 0;
__syncthreads();
if(y < IMG_HEIGHT)
{
const unsigned char* srcRow = image + y*IMG_WIDTH;
for(int i = 0,xx = x; i<PIXELS_PER_THREAD && xx < IMG_WIDTH;++i,xx +=
blockDim.x)
{
if(srcRow[xx])
{
const unsigned int val = (y<<16)|xx;
const int qidx = atomicAdd(&s_qsize[threadIdx.y],1);
s_queues[threadIdx.y][qidx] = val;
}
}
}
__syncthreads();
if(threadIdx.x == 0 && threadIdx.y == 0 )
{
int totalSize = 0;
for(int i =0;i<blockDim.y;++i)
{
s_globStart[i] = totalSize;
totalSize += s_qsize[i];
}
const int global_Offset = atomicAdd(&g_counter, totalSize);
for(int i =0 ;i<blockDim.y;++i)
s_globStart[i] += global_Offset;
}
__syncthreads();
const int qsize = s_qsize[threadIdx.y];
int gidx = s_globStart[threadIdx.y] + threadIdx.x;
for(int i = threadIdx.x; i<qsize; i+=blockDim.x, gidx +=blockDim.x)
{
list[gidx] = s_queues[threadIdx.y][i];
}
}
__global__ void fillHoughSpace(unsigned int* const list, const int count, int* hough_space,const float irho, const float theta, const int numrho)
{
int* smem = (int*)shmem;
for(int i =threadIdx.x; i< numrho + 1;i+=blockDim.x)
smem[i] = 0;
__syncthreads();
const int n = blockIdx.x;
const float ang = n*theta;
//printf("The angle value of n is %d \n", blockIdx.x);
//printf("Angle Values : %f \n", ang);
//printf("Inside Kernel");
float sinVal;
float cosVal;
sincosf(ang, &sinVal, &cosVal);
sinVal *= irho;
cosVal *= irho;
const int shift = (numrho -1)/2;
for(int i = threadIdx.x; i<count; i+= blockDim.x)
{
const unsigned int val = list[i];
const int x = (val & 0xFFFF);
const int y = (val >> 16) & 0xFFFF;
int r = __float2int_rn(x*cosVal + y*sinVal);
//printf("The value of x %d and the value of y %d : the value of r %d \n",x,y,r);
r += shift;
atomicAdd(&smem[r+1],1);
}
__syncthreads();
int* hrow = hough_space + (n+1)*(numrho + 2);
for(int i = threadIdx.x ;i< numrho + 1; i+=blockDim.x)
{
//printf("value of shared_memory at %d is %d \n",i,smem[i]);
hrow[i] = smem[i];
}
}
/*Non Maximum Suppression to get Valid Values*/
__global__ void getLines(const int * hough_space, float2* lines, int* votes, const int
maxLines, const float rho, const float theta, const int threshold, const
int numrho, const int rhspace)
{
const int r = blockIdx.x*blockDim.x + threadIdx.x;
const int n = blockIdx.y*blockDim.y + threadIdx.y;
if(r >=numrho || n >=rhspace -2)
{
return;
}
const int curVotes = *(hough_space + (n+1)*(numrho + 2)+ (r+1));
if(curVotes > *(hough_space + n*(numrho+2) + (r-1)) &&
curVotes > *(hough_space + n*(numrho + 2) + r) &&
curVotes > *(hough_space + n*(numrho + 2)+(r+1)) &&
curVotes > *(hough_space + n*(numrho + 2) + (r+2)) &&
curVotes > *(hough_space + n*(numrho+2) + (r+3)) &&
curVotes > *(hough_space + (n+1)*(numrho +2)+ r-1) &&
curVotes > *(hough_space + (n+1)*(numrho + 2) + r) &&
curVotes > *(hough_space +(n+1)*(numrho +2) + (r+2)) &&
curVotes > *(hough_space +(n+1)*(numrho +2) + (r+3)) &&
curVotes > *(hough_space +(n+2)*(numrho +2) + (r-1)) &&
curVotes > *(hough_space + (n+2)*(numrho +2) + r) &&
curVotes > *(hough_space + (n+2)*(numrho +2) + (r+1)) &&
curVotes > *(hough_space + (n+2)*(numrho +2) + (r+2)) &&
curVotes > *(hough_space + (n+2)*(numrho +2) + (r+3)) && curVotes > threshold)
{
const float radius = (r - (numrho -1)*0.5f)*rho;
const float angle = n*theta;
const int index = atomicAdd(&g_counter_lines,1);
if(index < maxLines)
{
//printf("index Value - %d \n", index);
//printf("Current Votes - %d \n", curVotes);
//printf("radius %f and angle %f \n", radius, angle);
//*(lines + index) = make_float2(radius, angle);
(lines + index)->x = radius;
(lines + index)->y = angle;
//printf("value of radius - %f and value of angle - %f and curVotes - %d \n ", (lines +index)->x,(lines + index)->y, curVotes);
*(votes + index) = curVotes;
}
}
}
lines_w_non_zero* houghTransform(unsigned char const* const edges,const int numangle, const int numrho,float thetaStep, float rStep)
{
/* if(debug_hough)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
}
*/
/*Replace by maximum function using cuda*/
const int threshold = 20;
unsigned char* gimage;
unsigned int* glist;
void* counterPtr;
cudaGetSymbolAddress(&counterPtr, g_counter);
cudaMemset(counterPtr,0,sizeof(int));
CudaCheckError();
cudaFuncSetCacheConfig(getNonzeroEdgepoints, cudaFuncCachePreferShared);
cudaMalloc((void**)&gimage, IMG_SIZE*sizeof(unsigned char));
CudaCheckError();
cudaMalloc((void**) &glist, IMG_SIZE*sizeof(unsigned int));
CudaCheckError();
/*Copy Image to GPU */
cudaMemcpy(gimage, edges, IMG_SIZE*sizeof(unsigned char),cudaMemcpyHostToDevice);
CudaCheckError();
dim3 dimBlock1(THREADS_X_HOUGH, THREADS_Y_HOUGH);
//dim3 dimGrid1(1, 56);
dim3 dimGrid1((IMG_WIDTH + THREADS_X_HOUGH*PIXELS_PER_THREAD
-1)/(THREADS_X_HOUGH*PIXELS_PER_THREAD), (IMG_HEIGHT +
THREADS_Y_HOUGH -1)/(THREADS_Y_HOUGH));
getNonzeroEdgepoints<<<dimGrid1,dimBlock1>>>(gimage, glist);
CudaCheckError();
cudaDeviceSynchronize();
int totalCount ;
cudaMemcpy(&totalCount, counterPtr, sizeof(int),cudaMemcpyDeviceToHost);
//cout<<"Total Count :"<<totalCount<<endl;
unsigned int* clist = (unsigned int*)malloc(totalCount*sizeof(unsigned int));
cudaMemcpy(clist, glist, totalCount*sizeof(unsigned int),cudaMemcpyDeviceToHost);
CudaCheckError();
if(debug_hough)
{
unsigned int* clist = (unsigned int*)malloc(totalCount*sizeof(unsigned int));
cudaMemcpy(clist, glist, totalCount*sizeof(unsigned int),cudaMemcpyDeviceToHost);
CudaCheckError();
for(int i = 0; i< totalCount; i++)
{
unsigned int const q_value = clist[i];
cout<<"q_value : "<<q_value<<endl;
const int x = (q_value & 0xFFFF);
const int y = (q_value >> 16 ) & 0xFFFF;
cout<<"coordinate ("<<x<<","<<y<<")"<<endl;
cout<<"Value at coordinate :"<<(int)*(edges + y*IMG_WIDTH + x)<<endl;
}
}
//Initialize hough_space
int hough_size = (numangle + 2)*(numrho + 2);
int rhspace = numangle + 2;
int colhspace = numrho + 2;
//cout<<"rows : "<<rhspace<<endl;
const dim3 block(1024);
const dim3 grid(rhspace -2);
//smemSize should be less than 49152 bytes
size_t smemSize = (colhspace - 1)*sizeof(int);
cout<<smemSize<<endl;
thetaStep = thetaStep*(CV_PI/180);
/*Allocate houghSpace on Gpu*/
int *d_hough_space;
cudaMalloc((void**)&d_hough_space,hough_size*sizeof(int));
CudaCheckError();
cudaMemset(d_hough_space, 0, hough_size*sizeof(int));
CudaCheckError();
fillHoughSpace<<<grid,block, smemSize>>>(glist, totalCount,d_hough_space, 1.0f/rStep, thetaStep, colhspace -2);
CudaCheckError();
cudaDeviceSynchronize();
if(debug_hough)
{
int* hough_space = (int*)malloc(hough_size*sizeof(int));
cudaMemcpy(hough_space, d_hough_space, hough_size*sizeof(int),cudaMemcpyDeviceToHost);
CudaCheckError();
for(int i =0;i<rhspace;i++)
{
for(int j =0;j<colhspace;j++)
{
cout<<*(hough_space + i*colhspace +j)<<"\t";
}
cout<<endl;
}
}
int maxLines = 75;
float2* d_lines;
int* d_votes;
cudaMalloc((void**)&d_lines,maxLines*sizeof(float2));
CudaCheckError();
cudaMalloc((void**)&d_votes, maxLines*sizeof(int));
CudaCheckError();
void *counterPtr_lines;
cudaGetSymbolAddress(&counterPtr_lines, g_counter_lines);
cudaMemset(counterPtr_lines, 0, sizeof(int));
CudaCheckError();
const dim3 block_1(32,8);
const int blocks_x = ((colhspace - 2 + block_1.x - 1)/(block_1.x));
const int blocks_y = ((rhspace - 2 + block_1.y -1 )/(block_1.y));
const dim3 grid_1(blocks_x, blocks_y);
cudaFuncSetCacheConfig(getLines, cudaFuncCachePreferL1);
getLines<<<grid_1, block_1>>>(d_hough_space, d_lines, d_votes, maxLines,rStep, thetaStep, threshold, colhspace -2, rhspace);
CudaCheckError();
cudaDeviceSynchronize();
int countlines;
cudaMemcpy(&countlines, counterPtr_lines, sizeof(int),cudaMemcpyDeviceToHost);
CudaCheckError();
countlines = min(countlines, maxLines);
float2* lines = (float2*)malloc(countlines*sizeof(float2));
int* votes = (int*)malloc(countlines*sizeof(int));
cudaMemcpy(lines, d_lines, countlines*sizeof(float2),cudaMemcpyDeviceToHost);
CudaCheckError();
cudaMemcpy(votes, d_votes, countlines*sizeof(int),cudaMemcpyDeviceToHost);
CudaCheckError();
map<float, vector<floatwint>> theta_to_votes_map;
for(int i =0;i<countlines;i++)
{
floatwint obj = {(float)(lines + i)->x, *(votes + i) };
theta_to_votes_map[(lines + i)->y].push_back(obj);
}
for(auto it = theta_to_votes_map.begin() ;it!= theta_to_votes_map.end(); ++it)
{
sort(it->second.begin() , it->second.end(), [](const floatwint& lhs, const floatwint& rhs) {return lhs.y > rhs.y;});
//cout<<"Vector Size \t"<<theta_to_votes_map[it->first].size()<<endl;
}
/*
int nSelectedLines = theta_to_votes_map.size();
float2* selLines = (float2*)malloc(nSelectedLines*sizeof(float2));
int* selvotes = (int*)malloc(nSelectedLines*sizeof(selvotes));
int index = 0;
*/
/*
for(auto it = theta_to_votes_map.begin(); it!= theta_to_votes_map.end(); ++it)
{
(selLines + index)->x = (it->second.begin())->x;
(selLines + index)->y = it->first;
*(selvotes + index) = (it->second.begin())->y;
index++;
}
*/
auto it = theta_to_votes_map.begin();
auto it_1 = next(it);
int index = 0;
/*count number of lines above 50 votes*/
int count_lines = 0;
int threshold_votes = 38;
for(auto it2 = it->second.begin(); it2 != it->second.end(); ++it2)
{
if(it2->y > threshold_votes)
count_lines++;
}
for(auto it2 = it_1->second.begin(); it2 != it_1->second.end();++it2)
{
if(it2->y > threshold_votes)
count_lines++;
}
/*
for(auto it = next(it_1) ; it != theta_to_votes_map.end();++it)
{
if(it->second.begin()->y > threshold_votes)
count_lines++;
}
*/
cout<<"Count of Lines \t"<<count_lines<<endl;
int nSelectedLines;
float2* selLines;
int* selvotes;
//int nSelectedLines = theta_to_votes_map[it->first].size() + theta_to_votes_map[it_1->first].size();
if(count_lines > 0)
{
nSelectedLines = count_lines;
selLines = (float2*)malloc(nSelectedLines*sizeof(float2));
selvotes = (int*)malloc(nSelectedLines*sizeof(selvotes));
for(auto it2 = it->second.begin(); it2 != it->second.end(); ++it2)
{
if(it2->y > threshold_votes)
{
(selLines + index)->x = (it2)->x;
(selLines + index)->y = it->first;
*(selvotes + index) = (it2)->y;
index++;
}
}
for(auto it2 = it_1->second.begin(); it2 != it_1->second.end();++it2)
{
if(it2->y > threshold_votes)
{
(selLines + index)->x = (it2)->x;
(selLines + index)->y = it_1->first;
*(selvotes + index) = (it2)->y;
index++;
}
}
/*
for(auto it = next(it_1); it != theta_to_votes_map.end();++it)
{
if(it->second.begin()->y > threshold_votes)
{
(selLines + index)->x = (it->second.begin())->x;
(selLines + index)->y = it->first;
*(selvotes + index) = (it->second.begin())->y;
index++;
}
}
*/
}
else
{
/*
if(theta_to_votes_map[it->first].size() > 2)
{
nSelectedLines += 2;
}
*/
//nSelectedLines = theta_to_votes_map[it->first].size() + theta_to_votes_map[it_1->first].size();
if(debug_hough)
{
cout<<"Key value \t"<<it->first<<" \t Vector Values \t"<<endl;
cout<<"Size \t"<<theta_to_votes_map[it->first].size()<<endl;
}
nSelectedLines = theta_to_votes_map[it->first].size();
selLines = (float2*)malloc(nSelectedLines*sizeof(float2));
selvotes = (int*)malloc(nSelectedLines*sizeof(selvotes));
for(auto it2 = it->second.begin(); it2 != it->second.end(); ++it2)
{
(selLines + index)->x = (it2)->x;
(selLines + index)->y = it->first;
*(selvotes + index) = (it2)->y;
index++;
}
/*
for(auto it2 = it_1->second.begin(); it2 != it_1->second.end();++it2)
{
(selLines + index)->x = (it2)->x;
(selLines + index)->y = it_1->first;
*(selvotes + index) = (it2)->y;
index++;
}
*/
}
if(debug_hough)
{
for(int i = 0 ; i < index;i++)
{
cout<<"Theta Value \t"<<(selLines + i)->y<<"\t"<<"Rho Value\t"<<(selLines + i)->x<<endl;
cout<<"Votes \t"<<*(selvotes + i)<<endl;
}
}
if(debug_hough)
{
for(auto it = theta_to_votes_map.begin() ;it!= theta_to_votes_map.end(); ++it)
{
cout<<"Key value \t"<<it->first<<" \t Vector Values \t";
for(auto it2 = it->second.begin(); it2 != it->second.end(); ++it2)
{
cout<<it2->x<<"\t"<<it2->y<<"\t";
}
cout<<endl;
}
}
if(debug_hough)
{
Mat gray_image = imread("/home/nvidia/IPM_test_image_10.png", 0);
for(int i =0;i<nSelectedLines;i++)
{
//float theta_line = (lines + i)->y;
float theta_line = (selLines + i)->y;
//float rho = (lines + i)->x;
float rho = (selLines + i)->x;
//int curr_votes = *(votes + i);
int curr_votes = *(selvotes + i);
cout<<"Rho - "<<rho<<" \t theta- "<<theta_line<<endl;
cout<<"Corresponding Votes \t"<<curr_votes<<endl;
cv::Point pt1, pt2;
double a = cos(theta_line);
double b = sin(theta_line);
double x0 = a*rho;
double y0 = b*rho;
pt1.x = (int)(x0 + 400*(-b));
pt1.y = (int)(y0 + 400*(a));
pt2.x = (int)(x0 - 400*(-b));
pt2.y = (int)(x0 - 400*(a));
line(gray_image, pt1,pt2, (255,0,0),2);
}
imwrite("/home/nvidia/inital_guess_hough_transform.png",
gray_image);
imshow("Image", gray_image);
waitKey(0);
}
lines_w_non_zero* values = (lines_w_non_zero*)malloc(sizeof(lines_w_non_zero));
lin_votes* mem_hough_lines = (lin_votes*)malloc(sizeof(lin_votes));
values->hough_lines = mem_hough_lines;
//values->hough_lines->lines = lines;
values->hough_lines->lines = selLines;
//values->hough_lines->countlines = countlines;
values->hough_lines->countlines = nSelectedLines;
values->clist = clist;
values->count = totalCount;
values->votes = selvotes;
//values->votes = votes;
/*
lin_votes* hough_lines = (lin_votes*)malloc(sizeof(lin_votes));
hough_lines->lines = lines;
hough_lines->countlines = countlines;
*/
/*
if(debug_hough)
{
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsed = 0;
cudaEventElapsedTime(&elapsed, start, stop);
cout<<"Elapsed Time"<<elapsed;
}
*/
return values;
}
|
e71a46b8bf2784bea837d1a92f6822fb769517c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define OLAMDA (0)
extern "C" void miniTV_gpu(float *x_recon,float *x0, int width, int height, float lamda,int iterNum);
hipArray* initCudaArrayTexture4Grad(texture<float, 3,hipReadModeElementType> &texArray,int width,int height, int length, float *HostData);
__global__ void kernelGradDesSlice(float * Dx,int size,float mu,int indexZ);
//texture<float, 3,hipReadModeElementType> texPriVolume;
texture<float, 3,hipReadModeElementType> texVolumeART;
texture<float, 3,hipReadModeElementType> texVolumeGrad;
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void UpdateVolume(float *tempDx, float *x_recon, int size, float t)
{
for(int index =0;index<size;index++)
{
x_recon[index] = t*tempDx[index] + x_recon[index];
}
}
__global__ void kernalGetObjFun(float *tmpResutls,float *Dx,int size,float t,int indexZ,float mu)
{
int z= (indexZ);
int x = (threadIdx.x) + (blockIdx.x)* 16;
int y = (threadIdx.y) + (blockIdx.y)* 16;
float sumTV = 0;
float GH = 0;
float GV = 0;
//float GHDIF = 0;
//float GVDIF = 0;
if(x> (size -1) || y> (size -1)) return;
if( ( y<(size -1) ) && ( x<(size -1) ) )
{
//|x|TV
GH = - tex3D(texVolumeGrad,x,y,z) + tex3D(texVolumeGrad,x ,y+1,z);
GV = - tex3D(texVolumeGrad,x,y,z) + tex3D(texVolumeGrad,x+1,y ,z);
if(t!=0)
{
GH += t * (-Dx[x + y * size ] + Dx[ x + (y +1 )* size ]);
GV += t * (-Dx[x + y * size ] + Dx[(x+1) + y * size ]);
}
sumTV += sqrtf(GH*GH + GV*GV);
}
tmpResutls[x + y*size] = sumTV * mu ;
}
float ObjFun(float *Dx,int size,float t,int indexZ,float mu,float *volumeART,float *currentVolume,float * hostDx)
{
float *DtmpResults = NULL;
int numVoxels = size * size;
hipMalloc((void **) &(DtmpResults), numVoxels*sizeof(float)); checkCUDAError("malloc objFun tmpResults");
float finalResults =0;
dim3 grid_dbp(size/16, size/16);
dim3 testgrid(16, 16);
//kernalGetObjFun<<<size,size>>>(DtmpResults,Dx,size,t,indexZ,mu);
hipLaunchKernelGGL(( kernalGetObjFun), dim3(grid_dbp),dim3(testgrid), 0, 0, DtmpResults,Dx,size,t,indexZ,mu);
float *HtmpResults = (float *)malloc(sizeof(float)*size*size);
hipMemcpy(HtmpResults,DtmpResults,sizeof(float)*size*size,hipMemcpyDeviceToHost);
for(int i =0;i<size*size;i++)
{
finalResults += HtmpResults[i];
finalResults += ( volumeART[i+ indexZ * numVoxels] - currentVolume[i+ indexZ * numVoxels] - hostDx[i] )
* ( volumeART[i+ indexZ * numVoxels] - currentVolume[i+ indexZ * numVoxels] - hostDx[i] );
}
free(HtmpResults);
hipFree(DtmpResults);
return finalResults;
}
float norm(float *Dx,int size)
{
float *hostDx = (float *)malloc(size*sizeof(float));
hipMemcpy(hostDx,Dx,sizeof(float)*size,hipMemcpyDeviceToHost);
float normValue = 0.0f;
for(int i=0;i<size;i++)
{
normValue = normValue + hostDx[i];
}
free(hostDx);
return normValue;
}
//
float searchLine(float *Dx,int size,int indexZ,float mu,float *volumeART,float *currentVolume, float * hostDx)
{
float alpha = 0.01f;
float beta = 0.5f;
int backiter = 0;
float t = 1.0f ;
int maxIter = 30;
float f0 = ObjFun(Dx,size,0.0f,indexZ,mu,volumeART,currentVolume,hostDx); //f0
float dtg = norm(Dx,size*size);
float fl = 0.0f;
float fr = 0.0f;
while(backiter<maxIter)
{
backiter++;
fl = ObjFun(Dx,size,t,indexZ,mu,volumeART,currentVolume,hostDx);
fr = f0 + alpha*t*dtg;
if(fr>=fl) break;
//printf("search line iter ====== %d \n",backiter);
t = beta*t;
}
//if(backiter == maxIter) t = 0.0f;
return t;
}
extern "C" void miniTV_gpu(float *x_recon,float *x0, int width, int height, float lamda,int iterNum)
{
int numVoxels = width * height;
int length =1;
memcpy((void *)x_recon,(void*) x0,sizeof(float)*width*height);
hipArray *cudaVolumeART = initCudaArrayTexture4Grad(texVolumeART,width,height,length,x0);
hipArray *cudaVolumeGrad = initCudaArrayTexture4Grad(texVolumeGrad,width,height,length,x0);
//dx
float *Dx;
hipMalloc((void **) &(Dx), numVoxels*sizeof(float)); checkCUDAError("malloc gradient");
hipMemset(Dx, 0, numVoxels*sizeof(float)); checkCUDAError("hipMemset ");
//
dim3 grid_dbp(width/16, height/16);
dim3 testgrid(16, 16);
float t = 1.0f;
for(int tvIter =0;tvIter<iterNum;tvIter++)
{
//get dx
t = 1.0f;
hipMemset(Dx, 0, width*height*sizeof(float)); checkCUDAError("cudaMemset1 ");
hipLaunchKernelGGL(( kernelGradDesSlice), dim3(grid_dbp),dim3(testgrid), 0, 0, Dx,width,lamda,1);checkCUDAError("kernelGradDesSlice");
float *tempDx = (float *) malloc(sizeof(float) * width * height);
hipMemcpy(tempDx,Dx,width*height*sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy tempDx");
t = searchLine(Dx ,width,0,lamda,x0,x_recon,tempDx);
//update volume
UpdateVolume(tempDx, x_recon, width*height, t);
free(tempDx);
//update in texture
hipMemcpy3DParms copyParams = {0};
hipExtent volumeSize = make_hipExtent(width, height, 1);
copyParams.srcPtr = make_hipPitchedPtr((void*)(x_recon), width*sizeof(float),width,width);
copyParams.dstArray = cudaVolumeGrad;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
checkCUDAError("update volumeGrad in texture");
}//end for TV iter
//free(VolumeART);
hipUnbindTexture(texVolumeART);
hipUnbindTexture(texVolumeGrad);
hipFreeArray(cudaVolumeART);
hipFreeArray(cudaVolumeGrad);
hipFree(Dx);
}
__global__ void kernelGradDesSlice(float * Dx,int size,float mu,int indexZ)
{
int z= (indexZ);
int x = (threadIdx.x) + (blockIdx.x)* 16;
int y = (threadIdx.y) + (blockIdx.y)* 16;
Dx[x+y*size] = 0.0f;
if(x> (size -1) || y> (size -1)) return;
float sum=0;
//grad volume_TV
float Dh = tex3D(texVolumeGrad,x+1,y,z) - tex3D(texVolumeGrad,x,y,z);
float Dv = tex3D(texVolumeGrad,x,y+1,z) - tex3D(texVolumeGrad,x,y,z);
float Dyh = 0.0f;
float Dyv = 0.0f;
float Dxh = 0.0f;
float Dxv = 0.0f;
if(y>0&&x>0)
{
Dyh = tex3D(texVolumeGrad,x+1,y-1,z) - tex3D(texVolumeGrad,x,y-1,z);
Dyv = tex3D(texVolumeGrad,x,y,z) - tex3D(texVolumeGrad,x,y-1,z);
Dxh = tex3D(texVolumeGrad,x,y,z) - tex3D(texVolumeGrad,x-1,y,z);
Dxv = tex3D(texVolumeGrad,x-1,y+1,z) - tex3D(texVolumeGrad,x-1,y,z);
}
float smallNum = 0.00000000000000001f;
float s = sqrt(Dh*Dh + Dv*Dv);
if(s<smallNum) s = smallNum;
sum += (1 - OLAMDA)*(Dh + Dv)/s;
//
s = sqrt(Dyh*Dyh + Dyv*Dyv);
if(s<smallNum) s = smallNum;
sum += (Dyv)*(OLAMDA -1.0f )/s;
//
s = sqrt(Dxh*Dxh + Dxv*Dxv);
if(s<smallNum) s = smallNum;
sum += (Dxh)*(OLAMDA -1.0f )/s;
Dx[ x + y*size] += mu * sum;
//texVolumeART
Dx[ x + y*size ] += tex3D(texVolumeART,x,y,z) - tex3D(texVolumeGrad,x,y,z);
}
hipArray* initCudaArrayTexture4Grad(texture<float, 3,hipReadModeElementType> &texArray,int width,int height, int length, float *HostData)
{
hipArray *cudaV;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipExtent extent;
extent.width= width;
extent.height=height;
extent.depth=length;
//alloc cuda array
hipMalloc3DArray(&cudaV,&channelDesc,extent);
checkCUDAError("hipMalloc3DArray cudaPriVolume");
//bind to texture and init
texArray.addressMode[0] = hipAddressModeClamp;
texArray.addressMode[1] = hipAddressModeClamp;
texArray.addressMode[2] = hipAddressModeClamp;
texArray.filterMode = hipFilterModePoint;
texArray.normalized = 0;
hipBindTextureToArray(texArray, cudaV,channelDesc);
checkCUDAError("hipBindTextureToArray texVolume");
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr((void*)(HostData),
width*sizeof(float),
height,
width);
copyParams.dstArray = cudaV;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
checkCUDAError("hipMemcpy3D cudaDynaIterCopyVolumeHostToDeviceArray");
return cudaV;
} | e71a46b8bf2784bea837d1a92f6822fb769517c5.cu | #include <cuda.h>
#include <stdio.h>
#define OLAMDA (0)
extern "C" void miniTV_gpu(float *x_recon,float *x0, int width, int height, float lamda,int iterNum);
cudaArray* initCudaArrayTexture4Grad(texture<float, 3,cudaReadModeElementType> &texArray,int width,int height, int length, float *HostData);
__global__ void kernelGradDesSlice(float * Dx,int size,float mu,int indexZ);
//texture<float, 3,cudaReadModeElementType> texPriVolume;
texture<float, 3,cudaReadModeElementType> texVolumeART;
texture<float, 3,cudaReadModeElementType> texVolumeGrad;
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void UpdateVolume(float *tempDx, float *x_recon, int size, float t)
{
for(int index =0;index<size;index++)
{
x_recon[index] = t*tempDx[index] + x_recon[index];
}
}
__global__ void kernalGetObjFun(float *tmpResutls,float *Dx,int size,float t,int indexZ,float mu)
{
int z= (indexZ);
int x = (threadIdx.x) + (blockIdx.x)* 16;
int y = (threadIdx.y) + (blockIdx.y)* 16;
float sumTV = 0;
float GH = 0;
float GV = 0;
//float GHDIF = 0;
//float GVDIF = 0;
if(x> (size -1) || y> (size -1)) return;
if( ( y<(size -1) ) && ( x<(size -1) ) )
{
//|x|TV
GH = - tex3D(texVolumeGrad,x,y,z) + tex3D(texVolumeGrad,x ,y+1,z);
GV = - tex3D(texVolumeGrad,x,y,z) + tex3D(texVolumeGrad,x+1,y ,z);
if(t!=0)
{
GH += t * (-Dx[x + y * size ] + Dx[ x + (y +1 )* size ]);
GV += t * (-Dx[x + y * size ] + Dx[(x+1) + y * size ]);
}
sumTV += sqrtf(GH*GH + GV*GV);
}
tmpResutls[x + y*size] = sumTV * mu ;
}
float ObjFun(float *Dx,int size,float t,int indexZ,float mu,float *volumeART,float *currentVolume,float * hostDx)
{
float *DtmpResults = NULL;
int numVoxels = size * size;
cudaMalloc((void **) &(DtmpResults), numVoxels*sizeof(float)); checkCUDAError("malloc objFun tmpResults");
float finalResults =0;
dim3 grid_dbp(size/16, size/16);
dim3 testgrid(16, 16);
//kernalGetObjFun<<<size,size>>>(DtmpResults,Dx,size,t,indexZ,mu);
kernalGetObjFun<<<grid_dbp,testgrid>>>(DtmpResults,Dx,size,t,indexZ,mu);
float *HtmpResults = (float *)malloc(sizeof(float)*size*size);
cudaMemcpy(HtmpResults,DtmpResults,sizeof(float)*size*size,cudaMemcpyDeviceToHost);
for(int i =0;i<size*size;i++)
{
finalResults += HtmpResults[i];
finalResults += ( volumeART[i+ indexZ * numVoxels] - currentVolume[i+ indexZ * numVoxels] - hostDx[i] )
* ( volumeART[i+ indexZ * numVoxels] - currentVolume[i+ indexZ * numVoxels] - hostDx[i] );
}
free(HtmpResults);
cudaFree(DtmpResults);
return finalResults;
}
float norm(float *Dx,int size)
{
float *hostDx = (float *)malloc(size*sizeof(float));
cudaMemcpy(hostDx,Dx,sizeof(float)*size,cudaMemcpyDeviceToHost);
float normValue = 0.0f;
for(int i=0;i<size;i++)
{
normValue = normValue + hostDx[i];
}
free(hostDx);
return normValue;
}
//
float searchLine(float *Dx,int size,int indexZ,float mu,float *volumeART,float *currentVolume, float * hostDx)
{
float alpha = 0.01f;
float beta = 0.5f;
int backiter = 0;
float t = 1.0f ;
int maxIter = 30;
float f0 = ObjFun(Dx,size,0.0f,indexZ,mu,volumeART,currentVolume,hostDx); //f0
float dtg = norm(Dx,size*size);
float fl = 0.0f;
float fr = 0.0f;
while(backiter<maxIter)
{
backiter++;
fl = ObjFun(Dx,size,t,indexZ,mu,volumeART,currentVolume,hostDx);
fr = f0 + alpha*t*dtg;
if(fr>=fl) break;
//printf("search line iter ====== %d \n",backiter);
t = beta*t;
}
//if(backiter == maxIter) t = 0.0f;
return t;
}
extern "C" void miniTV_gpu(float *x_recon,float *x0, int width, int height, float lamda,int iterNum)
{
int numVoxels = width * height;
int length =1;
memcpy((void *)x_recon,(void*) x0,sizeof(float)*width*height);
cudaArray *cudaVolumeART = initCudaArrayTexture4Grad(texVolumeART,width,height,length,x0);
cudaArray *cudaVolumeGrad = initCudaArrayTexture4Grad(texVolumeGrad,width,height,length,x0);
//dx
float *Dx;
cudaMalloc((void **) &(Dx), numVoxels*sizeof(float)); checkCUDAError("malloc gradient");
cudaMemset(Dx, 0, numVoxels*sizeof(float)); checkCUDAError("cudaMemset ");
//
dim3 grid_dbp(width/16, height/16);
dim3 testgrid(16, 16);
float t = 1.0f;
for(int tvIter =0;tvIter<iterNum;tvIter++)
{
//get dx
t = 1.0f;
cudaMemset(Dx, 0, width*height*sizeof(float)); checkCUDAError("cudaMemset1 ");
kernelGradDesSlice<<<grid_dbp,testgrid>>>(Dx,width,lamda,1);checkCUDAError("kernelGradDesSlice");
float *tempDx = (float *) malloc(sizeof(float) * width * height);
cudaMemcpy(tempDx,Dx,width*height*sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy tempDx");
t = searchLine(Dx ,width,0,lamda,x0,x_recon,tempDx);
//update volume
UpdateVolume(tempDx, x_recon, width*height, t);
free(tempDx);
//update in texture
cudaMemcpy3DParms copyParams = {0};
cudaExtent volumeSize = make_cudaExtent(width, height, 1);
copyParams.srcPtr = make_cudaPitchedPtr((void*)(x_recon), width*sizeof(float),width,width);
copyParams.dstArray = cudaVolumeGrad;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
checkCUDAError("update volumeGrad in texture");
}//end for TV iter
//free(VolumeART);
cudaUnbindTexture(texVolumeART);
cudaUnbindTexture(texVolumeGrad);
cudaFreeArray(cudaVolumeART);
cudaFreeArray(cudaVolumeGrad);
cudaFree(Dx);
}
__global__ void kernelGradDesSlice(float * Dx,int size,float mu,int indexZ)
{
int z= (indexZ);
int x = (threadIdx.x) + (blockIdx.x)* 16;
int y = (threadIdx.y) + (blockIdx.y)* 16;
Dx[x+y*size] = 0.0f;
if(x> (size -1) || y> (size -1)) return;
float sum=0;
//grad volume_TV
float Dh = tex3D(texVolumeGrad,x+1,y,z) - tex3D(texVolumeGrad,x,y,z);
float Dv = tex3D(texVolumeGrad,x,y+1,z) - tex3D(texVolumeGrad,x,y,z);
float Dyh = 0.0f;
float Dyv = 0.0f;
float Dxh = 0.0f;
float Dxv = 0.0f;
if(y>0&&x>0)
{
Dyh = tex3D(texVolumeGrad,x+1,y-1,z) - tex3D(texVolumeGrad,x,y-1,z);
Dyv = tex3D(texVolumeGrad,x,y,z) - tex3D(texVolumeGrad,x,y-1,z);
Dxh = tex3D(texVolumeGrad,x,y,z) - tex3D(texVolumeGrad,x-1,y,z);
Dxv = tex3D(texVolumeGrad,x-1,y+1,z) - tex3D(texVolumeGrad,x-1,y,z);
}
float smallNum = 0.00000000000000001f;
float s = sqrt(Dh*Dh + Dv*Dv);
if(s<smallNum) s = smallNum;
sum += (1 - OLAMDA)*(Dh + Dv)/s;
//
s = sqrt(Dyh*Dyh + Dyv*Dyv);
if(s<smallNum) s = smallNum;
sum += (Dyv)*(OLAMDA -1.0f )/s;
//
s = sqrt(Dxh*Dxh + Dxv*Dxv);
if(s<smallNum) s = smallNum;
sum += (Dxh)*(OLAMDA -1.0f )/s;
Dx[ x + y*size] += mu * sum;
//texVolumeART
Dx[ x + y*size ] += tex3D(texVolumeART,x,y,z) - tex3D(texVolumeGrad,x,y,z);
}
cudaArray* initCudaArrayTexture4Grad(texture<float, 3,cudaReadModeElementType> &texArray,int width,int height, int length, float *HostData)
{
cudaArray *cudaV;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaExtent extent;
extent.width= width;
extent.height=height;
extent.depth=length;
//alloc cuda array
cudaMalloc3DArray(&cudaV,&channelDesc,extent);
checkCUDAError("cudaMalloc3DArray cudaPriVolume");
//bind to texture and init
texArray.addressMode[0] = cudaAddressModeClamp;
texArray.addressMode[1] = cudaAddressModeClamp;
texArray.addressMode[2] = cudaAddressModeClamp;
texArray.filterMode = cudaFilterModePoint;
texArray.normalized = 0;
cudaBindTextureToArray(texArray, cudaV,channelDesc);
checkCUDAError("cudaBindTextureToArray texVolume");
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr((void*)(HostData),
width*sizeof(float),
height,
width);
copyParams.dstArray = cudaV;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
checkCUDAError("cudaMemcpy3D cudaDynaIterCopyVolumeHostToDeviceArray");
return cudaV;
} |
8475bd3453ec1371c92d3bdb590725564ac07738.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgedensereimsplit.cu, normal z -> c, Wed Jan 2 14:18:54 2019
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
cgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaFloatComplex * A,
magmaFloatComplex * ReA,
magmaFloatComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_C_MAKE( MAGMA_C_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_C_MAKE( MAGMA_C_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_c_matrix
input matrix A.
@param[out]
ReA magma_c_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_c_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C"
magma_int_t
magma_cgedensereimsplit(
magma_c_matrix A,
magma_c_matrix *ReA,
magma_c_matrix *ImA,
magma_queue_t queue )
{
magma_cmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_cmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
| 8475bd3453ec1371c92d3bdb590725564ac07738.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgedensereimsplit.cu, normal z -> c, Wed Jan 2 14:18:54 2019
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
cgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaFloatComplex * A,
magmaFloatComplex * ReA,
magmaFloatComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_C_MAKE( MAGMA_C_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_C_MAKE( MAGMA_C_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_c_matrix
input matrix A.
@param[out]
ReA magma_c_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_c_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C"
magma_int_t
magma_cgedensereimsplit(
magma_c_matrix A,
magma_c_matrix *ReA,
magma_c_matrix *ImA,
magma_queue_t queue )
{
magma_cmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_cmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
cgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
15e40f3da6eaf5117cca6e76e597793d1eecb959.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe9 {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe9
| 15e40f3da6eaf5117cca6e76e597793d1eecb959.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
namespace caffe9 {
template <typename Dtype>
__global__ void ReLUForward(const int n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void ReLUBackward(const int n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ((in_data[index] > 0)
+ (in_data[index] <= 0) * negative_slope);
}
}
template <typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe9
|
aae896206eb91af21727b4e98da6590c77ca4bd9.hip | // !!! This is a file automatically generated by hipify!!!
/*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.hip"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
map<unsigned int, map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) { a = a_; }
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
hipHostFree(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
hipHostMalloc((void**) &buff, fileSize,hipHostMallocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
if(grp.size() < mRecCount)
grp.resize(mRecCount);
thrust::fill(grp.begin(), grp.begin()+mRecCount,0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 1;
unsigned int bits;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
};
}
else if(bits == 16) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
};
}
else if(bits == 32) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
};
}
else {
if (type[columnRef.top()] != 1) { // int_type
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else {
thrust::transform(d_columns_float[columnRef.top()].begin(), d_columns_float[columnRef.top()].begin() + mRecCount - 1,
d_columns_float[columnRef.top()].begin()+1, d_group, f_not_equal_to());
};
}
thrust::transform(d_group, d_group+mRecCount, grp.begin(), grp.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp.begin(), grp.begin()+mRecCount, 1);
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
hipFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
string s = file_name + ".interval";
ifstream f(s.c_str());
if (f.good()) {
f.seekg (0, f.end);
int length = f.tellg();
f.seekg (0, f.beg);
char* buff = new char[length];
f.read(buff, length);
f.close();
char* p = strtok(buff, "|");
string s1(p);
p = strtok(NULL, "|");
string s2(p);
delete [] buff;
s = file_name + ".key";
ifstream f1(s.c_str());
if (f1.good()) {
f1.seekg (0, f1.end);
length = f1.tellg();
f1.seekg (0, f1.beg);
buff = new char[length+1];
buff[length] = 0;
f1.read(buff, length);
f1.close();
string s3(buff);
delete [] buff;
load_file_name = file_name;
calc_intervals(s1, s2, s3, total_segments, append);
int_check = 1;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && total_segments == 1) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
char* strings = new char[oldCount*char_size[colname]];
binary_file.read(strings, oldCount*char_size[colname]);
binary_file.close();
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < oldCount; z++) {
char_hash[ind][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
hipFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::calc_intervals(string dt1, string dt2, string index, unsigned int total_segs, bool append) {
alloced_switch = 1;
not_compressed = 1;
thrust::device_vector<unsigned int> permutation;
thrust::device_vector<int_type> stencil(maxRecs);
thrust::device_vector<int_type> d_dt2(maxRecs);
thrust::device_vector<int_type> d_index(maxRecs);
phase_copy = 0;
queue<string> sf;
sf.push(dt1);
sf.push(index);
gpu_perm(sf, permutation);
for(unsigned int i = 0; i < columnNames.size(); i++) {
if(type[columnNames[i]] == 0)
apply_permutation(d_columns_int[columnNames[i]], thrust::raw_pointer_cast(permutation.data()), mRecCount, (int_type*)thrust::raw_pointer_cast(stencil.data()), 0);
else {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[columnNames[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[columnNames[i]], h_permutation, mRecCount, t, char_size[columnNames[i]]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[columnNames[i]]*mRecCount, h_columns_char[columnNames[i]]);
delete [] t;
};
};
if(type[index] == 2) {
d_columns_int[index] = thrust::device_vector<int_type>(mRecCount);
h_columns_int[index] = thrust::host_vector<int_type>(mRecCount);
for(int i = 0; i < mRecCount; i++)
h_columns_int[index][i] = MurmurHash64A(&h_columns_char[index][i*char_size[index]], char_size[index], hash_seed)/2;
d_columns_int[index] = h_columns_int[index];
};
thrust::counting_iterator<unsigned int> begin(0);
gpu_interval ff(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_columns_int[dt2].data()), thrust::raw_pointer_cast(d_columns_int[index].data()));
thrust::for_each(begin, begin + mRecCount - 1, ff);
auto stack_count = mRecCount;
if(append) {
not_compressed = 0;
size_t mysz = 8;
if(char_size[index] > int_size)
mysz = char_size[index];
if(mysz*maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, mysz*maxRecs);
alloced_sz = mysz*maxRecs;
}
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
d_columns_int[dt2].resize(0);
thrust::device_vector<unsigned int> output(stack_count);
for(int i = 0; i < total_segments; i++) {
CopyColumnToGpu(dt2, i, 0);
if(thrust::count(d_col, d_col+mRecCount,0)) {
thrust::copy(d_col, d_col+mRecCount, d_dt2.begin());
if(type[index] == 2) {
string f1 = load_file_name + "." + index + "." + to_string(i) + ".hash";
FILE* f = fopen(f1.c_str(), "rb" );
unsigned int cnt;
fread(&cnt, 4, 1, f);
unsigned long long int* buff = new unsigned long long int[cnt];
fread(buff, cnt*8, 1, f);
fclose(f);
thrust::copy(buff, buff + cnt, d_index.begin());
delete [] buff;
}
else {
CopyColumnToGpu(index, i, 0);
thrust::copy(d_col, d_col+mRecCount, d_index.begin());
};
thrust::lower_bound(d_columns_int[index].begin(), d_columns_int[index].begin()+stack_count, d_index.begin(), d_index.begin() + mRecCount, output.begin());
gpu_interval_set f(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_dt2.data()),
thrust::raw_pointer_cast(d_index.data()), thrust::raw_pointer_cast(d_columns_int[index].data()),
thrust::raw_pointer_cast(output.data()));
thrust::for_each(begin, begin + mRecCount, f);
string str = load_file_name + "." + dt2 + "." + to_string(i);;
pfor_compress( thrust::raw_pointer_cast(d_dt2.data()), mRecCount*int_size, str, h_columns_int[dt2], 0);
};
};
}
};
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
//fprintf(file_pr, "%.*s", string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].size(), string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].c_str());
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
cout << "here3 " << endl;
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
map<unsigned long long int, size_t>::iterator iter;
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[ind].find(hash_array[i]);
if(iter == char_hash[ind].end()) {
cnt = char_hash[ind].size();
char_hash[ind][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
h_columns_int[colname][i] = cnt;
}
else {
h_columns_int[colname][i] = iter->second;
};
};
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
void CudaSet::compress_int(const string file_name, const string colname, const size_t mCount)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < mCount; i++) {
int_type f = h_columns_int[colname][i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < mCount; i++) {
int_type f = h_columns_int[colname][i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(readbuff[rb-1] != '\n') {
rb++;
readbuff[rb-1] = '\n';
};
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
//cout << "curr_cnt " << curr_cnt << " Memory: " << getFreeMem() << endl;
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
hipMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
hipMemset(dest[i],0,max_len*rec_sz);
}
else {
hipMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
hipMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, hipMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
hipFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(unsigned int)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
hipHostMalloc(&buff, fileSize, hipHostMallocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
res = (index_buffers[f1]+4 +8*sz + (vals_count+2)*int_size)[0];
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, hipMemcpyHostToDevice);
if(idx_vals.count(index_name))
hipFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
//cout << index_name << " " << d_array[i] << " " << i << endl;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, hipMemcpyHostToDevice);
if(idx_vals.count(index_name))
hipFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end()) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<char> src((char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
hipMemcpy( (void*)tmp, (void*) key, RecCount*len, hipMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
//cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
//cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
hipFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
hipMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, hipMemcpyHostToHost);
};
};
}
else if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
hipFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
hipFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
hipMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, hipMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
hipMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
hipMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, hipMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
hipFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
| aae896206eb91af21727b4e98da6590c77ca4bd9.cu | /*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.cu"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
map<unsigned int, map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) { a = a_; }
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
cudaFreeHost(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
cudaHostAlloc((void**) &buff, fileSize,cudaHostAllocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
if(grp.size() < mRecCount)
grp.resize(mRecCount);
thrust::fill(grp.begin(), grp.begin()+mRecCount,0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 1;
unsigned int bits;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
};
}
else if(bits == 16) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
};
}
else if(bits == 32) {
if (type[columnRef.top()] != 1) { // int_type
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_float[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
};
}
else {
if (type[columnRef.top()] != 1) { // int_type
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
}
else {
thrust::transform(d_columns_float[columnRef.top()].begin(), d_columns_float[columnRef.top()].begin() + mRecCount - 1,
d_columns_float[columnRef.top()].begin()+1, d_group, f_not_equal_to());
};
}
thrust::transform(d_group, d_group+mRecCount, grp.begin(), grp.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp.begin(), grp.begin()+mRecCount, 1);
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
cudaFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
string s = file_name + ".interval";
ifstream f(s.c_str());
if (f.good()) {
f.seekg (0, f.end);
int length = f.tellg();
f.seekg (0, f.beg);
char* buff = new char[length];
f.read(buff, length);
f.close();
char* p = strtok(buff, "|");
string s1(p);
p = strtok(NULL, "|");
string s2(p);
delete [] buff;
s = file_name + ".key";
ifstream f1(s.c_str());
if (f1.good()) {
f1.seekg (0, f1.end);
length = f1.tellg();
f1.seekg (0, f1.beg);
buff = new char[length+1];
buff[length] = 0;
f1.read(buff, length);
f1.close();
string s3(buff);
delete [] buff;
load_file_name = file_name;
calc_intervals(s1, s2, s3, total_segments, append);
int_check = 1;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && total_segments == 1) {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
char* strings = new char[oldCount*char_size[colname]];
binary_file.read(strings, oldCount*char_size[colname]);
binary_file.close();
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < oldCount; z++) {
char_hash[ind][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
cudaFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::calc_intervals(string dt1, string dt2, string index, unsigned int total_segs, bool append) {
alloced_switch = 1;
not_compressed = 1;
thrust::device_vector<unsigned int> permutation;
thrust::device_vector<int_type> stencil(maxRecs);
thrust::device_vector<int_type> d_dt2(maxRecs);
thrust::device_vector<int_type> d_index(maxRecs);
phase_copy = 0;
queue<string> sf;
sf.push(dt1);
sf.push(index);
gpu_perm(sf, permutation);
for(unsigned int i = 0; i < columnNames.size(); i++) {
if(type[columnNames[i]] == 0)
apply_permutation(d_columns_int[columnNames[i]], thrust::raw_pointer_cast(permutation.data()), mRecCount, (int_type*)thrust::raw_pointer_cast(stencil.data()), 0);
else {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[columnNames[i]]*mRecCount];
apply_permutation_char_host(h_columns_char[columnNames[i]], h_permutation, mRecCount, t, char_size[columnNames[i]]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[columnNames[i]]*mRecCount, h_columns_char[columnNames[i]]);
delete [] t;
};
};
if(type[index] == 2) {
d_columns_int[index] = thrust::device_vector<int_type>(mRecCount);
h_columns_int[index] = thrust::host_vector<int_type>(mRecCount);
for(int i = 0; i < mRecCount; i++)
h_columns_int[index][i] = MurmurHash64A(&h_columns_char[index][i*char_size[index]], char_size[index], hash_seed)/2;
d_columns_int[index] = h_columns_int[index];
};
thrust::counting_iterator<unsigned int> begin(0);
gpu_interval ff(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_columns_int[dt2].data()), thrust::raw_pointer_cast(d_columns_int[index].data()));
thrust::for_each(begin, begin + mRecCount - 1, ff);
auto stack_count = mRecCount;
if(append) {
not_compressed = 0;
size_t mysz = 8;
if(char_size[index] > int_size)
mysz = char_size[index];
if(mysz*maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, mysz*maxRecs);
alloced_sz = mysz*maxRecs;
}
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
d_columns_int[dt2].resize(0);
thrust::device_vector<unsigned int> output(stack_count);
for(int i = 0; i < total_segments; i++) {
CopyColumnToGpu(dt2, i, 0);
if(thrust::count(d_col, d_col+mRecCount,0)) {
thrust::copy(d_col, d_col+mRecCount, d_dt2.begin());
if(type[index] == 2) {
string f1 = load_file_name + "." + index + "." + to_string(i) + ".hash";
FILE* f = fopen(f1.c_str(), "rb" );
unsigned int cnt;
fread(&cnt, 4, 1, f);
unsigned long long int* buff = new unsigned long long int[cnt];
fread(buff, cnt*8, 1, f);
fclose(f);
thrust::copy(buff, buff + cnt, d_index.begin());
delete [] buff;
}
else {
CopyColumnToGpu(index, i, 0);
thrust::copy(d_col, d_col+mRecCount, d_index.begin());
};
thrust::lower_bound(d_columns_int[index].begin(), d_columns_int[index].begin()+stack_count, d_index.begin(), d_index.begin() + mRecCount, output.begin());
gpu_interval_set f(thrust::raw_pointer_cast(d_columns_int[dt1].data()), thrust::raw_pointer_cast(d_dt2.data()),
thrust::raw_pointer_cast(d_index.data()), thrust::raw_pointer_cast(d_columns_int[index].data()),
thrust::raw_pointer_cast(output.data()));
thrust::for_each(begin, begin + mRecCount, f);
string str = load_file_name + "." + dt2 + "." + to_string(i);;
pfor_compress( thrust::raw_pointer_cast(d_dt2.data()), mRecCount*int_size, str, h_columns_int[dt2], 0);
};
};
}
};
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
//fprintf(file_pr, "%.*s", string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].size(), string_hash[columnNames[j]][h_columns_int[columnNames[j]][i]].c_str());
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
cout << "here3 " << endl;
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
map<unsigned long long int, size_t>::iterator iter;
unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[ind].find(hash_array[i]);
if(iter == char_hash[ind].end()) {
cnt = char_hash[ind].size();
char_hash[ind][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
h_columns_int[colname][i] = cnt;
}
else {
h_columns_int[colname][i] = iter->second;
};
};
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
void CudaSet::compress_int(const string file_name, const string colname, const size_t mCount)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < mCount; i++) {
int_type f = h_columns_int[colname][i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < mCount; i++) {
int_type f = h_columns_int[colname][i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(readbuff[rb-1] != '\n') {
rb++;
readbuff[rb-1] = '\n';
};
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
//cout << "curr_cnt " << curr_cnt << " Memory: " << getFreeMem() << endl;
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
cudaMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
cudaMemset(dest[i],0,max_len*rec_sz);
}
else {
cudaMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
cudaMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, cudaMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
cudaFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s) res = 1;
else res = 0;
else if (op_type == 1) // <
if(d<s) res = 1;
else res = 0;
else if (op_type == 6) // >=
if(d>=s) res = 1;
else res = 0;
else if (op_type == 5) // <=
if(d<=s) res = 1;
else res = 0;
else if (op_type == 4)// =
if(d==s) res = 1;
else res = 0;
else // !=
if(d!=s) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON) res = 1;
else res = 0;
else if (op_type == 1) // <
if ((s-d) > EPSILON) res = 1;
else res = 0;
else if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
else if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON)) res = 1;
else res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON))) res = 1;
else res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(unsigned int)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
cudaHostAlloc(&buff, fileSize, cudaHostAllocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
res = (index_buffers[f1]+4 +8*sz + (vals_count+2)*int_size)[0];
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
//cout << index_name << " " << d_array[i] << " " << i << endl;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 5;
else if (op_type == 1) // <
return 6;
else if (op_type == 6) // >=
return 1;
else if (op_type == 5) // <=
return 2;
else return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end()) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<char> src((char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
//cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
//cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
cudaFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
cudaMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, cudaMemcpyHostToHost);
};
};
}
else if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
cudaFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
cudaFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
cudaMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, cudaMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
cudaMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
cudaMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, cudaMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
cudaFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
|
2450cb1c8a0910e1dda2d46280813758df250f5b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* reduction kernel. Initially, each thread will copy 1 item of data
* from global to shared memory. Then will will do the binary tree dance.
*/
__global__ void reduce(float* out, float* in, int size) {
__shared__ float temp[1024];
int index = blockDim.x*blockIdx.x + threadIdx.x;
int myId = threadIdx.x; // a value between 0 and 1023
if (index >= size) return;
// move data to shared memory for speed
temp[myId] = in[index];
__syncthreads();
int stride = blockDim.x/2;
while (stride >= 1) {
if (myId < stride) {
temp[myId] += temp[myId + stride];
}
__syncthreads();
stride = stride/2;
}
out[blockIdx.x] = temp[0];
}
| 2450cb1c8a0910e1dda2d46280813758df250f5b.cu | /*
* reduction kernel. Initially, each thread will copy 1 item of data
* from global to shared memory. Then will will do the binary tree dance.
*/
__global__ void reduce(float* out, float* in, int size) {
__shared__ float temp[1024];
int index = blockDim.x*blockIdx.x + threadIdx.x;
int myId = threadIdx.x; // a value between 0 and 1023
if (index >= size) return;
// move data to shared memory for speed
temp[myId] = in[index];
__syncthreads();
int stride = blockDim.x/2;
while (stride >= 1) {
if (myId < stride) {
temp[myId] += temp[myId + stride];
}
__syncthreads();
stride = stride/2;
}
out[blockIdx.x] = temp[0];
}
|
9a960b5950722aec6ed596ea19a6aac6d6e79dd2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/add.cuh>
#include <raft/linalg/subtract.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/random/rng.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include "test_utils.h"
namespace raft {
namespace linalg {
template <typename T, typename IdxType = int>
struct DevScalarInputs {
T tolerance;
IdxType len;
T scalar;
bool add;
unsigned long long int seed;
};
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T, typename IdxType = int>
void unaryOpLaunch(T* out, const T* in, T scalar, IdxType len, bool add, hipStream_t stream)
{
raft::linalg::unaryOp(
out,
in,
len,
[scalar, add] __device__(T in) { return add ? in + scalar : in - scalar; },
stream);
}
template <typename T, typename IdxType>
class DevScalarTest : public ::testing::TestWithParam<DevScalarInputs<T, IdxType>> {
protected:
DevScalarTest() : in(0, stream), out_ref(0, stream), out(0, stream), scalar(stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<DevScalarInputs<T, IdxType>>::GetParam();
raft::random::Rng r(params.seed);
CUDA_CHECK(hipStreamCreate(&stream));
auto len = params.len;
in.resize(len, stream);
out_ref.resize(len, stream);
out.resize(len, stream);
raft::update_device(scalar.data(), ¶ms.scalar, 1, stream);
r.uniform(in.data(), len, T(-1.0), T(1.0), stream);
unaryOpLaunch(out_ref.data(), in.data(), params.scalar, len, params.add, stream);
if (params.add) {
addDevScalar(out.data(), in.data(), scalar.data(), len, stream);
} else {
subtractDevScalar(out.data(), in.data(), scalar.data(), len, stream);
}
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
hipStream_t stream = 0;
DevScalarInputs<T, IdxType> params;
rmm::device_uvector<T> in, out_ref, out;
rmm::device_scalar<T> scalar;
};
const std::vector<DevScalarInputs<float, int>> inputsf_i32 = {
{0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}};
typedef DevScalarTest<float, int> DevScalarTestF_i32;
TEST_P(DevScalarTestF_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i32, ::testing::ValuesIn(inputsf_i32));
const std::vector<DevScalarInputs<float, size_t>> inputsf_i64 = {
{0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}};
typedef DevScalarTest<float, size_t> DevScalarTestF_i64;
TEST_P(DevScalarTestF_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i64, ::testing::ValuesIn(inputsf_i64));
const std::vector<DevScalarInputs<double, int>> inputsd_i32 = {
{0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}};
typedef DevScalarTest<double, int> DevScalarTestD_i32;
TEST_P(DevScalarTestD_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i32, ::testing::ValuesIn(inputsd_i32));
const std::vector<DevScalarInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}};
typedef DevScalarTest<double, size_t> DevScalarTestD_i64;
TEST_P(DevScalarTestD_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i64, ::testing::ValuesIn(inputsd_i64));
} // end namespace linalg
} // end namespace raft
| 9a960b5950722aec6ed596ea19a6aac6d6e79dd2.cu | /*
* Copyright (c) 2018-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/linalg/add.cuh>
#include <raft/linalg/subtract.cuh>
#include <raft/linalg/unary_op.cuh>
#include <raft/random/rng.cuh>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include "test_utils.h"
namespace raft {
namespace linalg {
template <typename T, typename IdxType = int>
struct DevScalarInputs {
T tolerance;
IdxType len;
T scalar;
bool add;
unsigned long long int seed;
};
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T, typename IdxType = int>
void unaryOpLaunch(T* out, const T* in, T scalar, IdxType len, bool add, cudaStream_t stream)
{
raft::linalg::unaryOp(
out,
in,
len,
[scalar, add] __device__(T in) { return add ? in + scalar : in - scalar; },
stream);
}
template <typename T, typename IdxType>
class DevScalarTest : public ::testing::TestWithParam<DevScalarInputs<T, IdxType>> {
protected:
DevScalarTest() : in(0, stream), out_ref(0, stream), out(0, stream), scalar(stream) {}
void SetUp() override
{
params = ::testing::TestWithParam<DevScalarInputs<T, IdxType>>::GetParam();
raft::random::Rng r(params.seed);
CUDA_CHECK(cudaStreamCreate(&stream));
auto len = params.len;
in.resize(len, stream);
out_ref.resize(len, stream);
out.resize(len, stream);
raft::update_device(scalar.data(), ¶ms.scalar, 1, stream);
r.uniform(in.data(), len, T(-1.0), T(1.0), stream);
unaryOpLaunch(out_ref.data(), in.data(), params.scalar, len, params.add, stream);
if (params.add) {
addDevScalar(out.data(), in.data(), scalar.data(), len, stream);
} else {
subtractDevScalar(out.data(), in.data(), scalar.data(), len, stream);
}
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
cudaStream_t stream = 0;
DevScalarInputs<T, IdxType> params;
rmm::device_uvector<T> in, out_ref, out;
rmm::device_scalar<T> scalar;
};
const std::vector<DevScalarInputs<float, int>> inputsf_i32 = {
{0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}};
typedef DevScalarTest<float, int> DevScalarTestF_i32;
TEST_P(DevScalarTestF_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i32, ::testing::ValuesIn(inputsf_i32));
const std::vector<DevScalarInputs<float, size_t>> inputsf_i64 = {
{0.000001f, 1024 * 1024, 2.f, true, 1234ULL}, {0.000001f, 1024 * 1024, 2.f, false, 1234ULL}};
typedef DevScalarTest<float, size_t> DevScalarTestF_i64;
TEST_P(DevScalarTestF_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestF_i64, ::testing::ValuesIn(inputsf_i64));
const std::vector<DevScalarInputs<double, int>> inputsd_i32 = {
{0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}};
typedef DevScalarTest<double, int> DevScalarTestD_i32;
TEST_P(DevScalarTestD_i32, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i32, ::testing::ValuesIn(inputsd_i32));
const std::vector<DevScalarInputs<double, size_t>> inputsd_i64 = {
{0.00000001, 1024 * 1024, 2.0, true, 1234ULL}, {0.00000001, 1024 * 1024, 2.0, false, 1234ULL}};
typedef DevScalarTest<double, size_t> DevScalarTestD_i64;
TEST_P(DevScalarTestD_i64, Result)
{
ASSERT_TRUE(devArrMatch(
out_ref.data(), out.data(), params.len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(DevScalarTests, DevScalarTestD_i64, ::testing::ValuesIn(inputsd_i64));
} // end namespace linalg
} // end namespace raft
|
7f96b7599f9283332a835f8f8e1cb17ed596c65f.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <limits>
namespace at::native {
const char acos_name[] = "acos";
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acos_string = jiterator_stringify(
template <typename T> T acos(T a) { return std::acos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/acos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"acos_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
} // namespace at::native
| 7f96b7599f9283332a835f8f8e1cb17ed596c65f.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
const char acos_name[] = "acos";
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acos_string = jiterator_stringify(
template <typename T> T acos(T a) { return std::acos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/acos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"acos_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
} // namespace at::native
|
68a9afd9d60b6ab49b48610e97d3d919da52120c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/batch_norm_plus_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <iostream>
namespace caffe {
template <typename Dtype>
__global__ void BatchNormPlusForward(const int n, Dtype* in,const Dtype* scale,
const Dtype* bias, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
void BatchNormPlusLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->shape(0);
int spatial_dim = bottom[0]->count() / (channels_*bottom[0]->shape(0));
if (bottom[0] != top[0]) {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[0]->gpu_data(), mean_.mutable_gpu_data());
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[1]->gpu_data(), variance_.mutable_gpu_data());
}
else {
// compute mean
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), bottom_data,
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
}
// subtract mean
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, -1, num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., top_data);
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
caffe_gpu_powx(top[0]->count(), top_data, Dtype(2),
temp_.mutable_gpu_data()); // (X-EX)^2
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), temp_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
variance_.mutable_gpu_data()); // E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
caffe_gpu_axpby(mean_.count(), Dtype(1), mean_.gpu_data(),
moving_average_fraction_, this->blobs_[0]->mutable_gpu_data());
int m = bottom[0]->count() / channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m) / (m - 1) : 1;
caffe_gpu_axpby(variance_.count(), bias_correction_factor,
variance_.gpu_data(), moving_average_fraction_,
this->blobs_[1]->mutable_gpu_data());
}
// normalize variance
caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data());
caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5),
variance_.mutable_gpu_data());
// replicate variance to input size
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), variance_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data());
caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data);
// TODO(cdoersch): The caching is only needed because later in-place layers
// might clobber the data. Can we skip this if they won't?
if (!use_global_stats_) {
caffe_copy(x_norm_.count(), top_data,
x_norm_.mutable_gpu_data());
}
// add scale layer by zhangfeng
bool scale_bias = this->layer_param_.batch_norm_plus_param().scale_bias();
if (scale_bias){
const Dtype* scale_data = this->blobs_[3]->gpu_data();
const Dtype* bias_data = this->blobs_[4]->gpu_data();
const int count = top[0]->count();
BatchNormPlusForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, top_data,scale_data, bias_data, channels_, spatial_dim, top_data);
}
caffe_copy(x_temp_top.count(), top_data, x_temp_top.mutable_gpu_data());
}
template <typename Dtype>
__global__ void BatchNormPlusBackward(const int n, const Dtype* in, const Dtype* scale,
const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
void BatchNormPlusLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff;
int num = bottom[0]->shape(0);
int spatial_dim = bottom[0]->count() / (bottom[0]->shape(0)*channels_);
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
}
else {
caffe_copy(x_temp_top.count(), top[0]->gpu_diff(),
x_temp_top.mutable_gpu_diff());
top_diff = x_temp_top.gpu_diff();
}
//scale
bool scale_bias = this->layer_param_.batch_norm_plus_param().scale_bias();//
if (scale_bias){//alphabeta
//scale
Dtype* scale_diff = this->blobs_[3]->mutable_gpu_diff();
//1 dE/dy * x_norm
caffe_gpu_mul<Dtype>(top[0]->count(), top_diff, x_norm_.gpu_data(), x_temp_top.mutable_gpu_data());
// 2.sum
caffe_gpu_gemv<Dtype>(CblasNoTrans, num*channels_, spatial_dim, 1.,
x_temp_top.gpu_data(), spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data()); // NC* HW * HW(1)*1 = NC*1
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0, scale_diff);
// bias DE/dy
Dtype* bias_diff = this->blobs_[4]->mutable_gpu_diff();
caffe_gpu_gemv<Dtype>(CblasNoTrans, num*channels_, spatial_dim, 1.,
top_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data()); // NC* HW * HW(1)*1 = NC*1
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0, bias_diff);
//dE/dx= dE/dy * scale; sum
const Dtype* scale_data = this->blobs_[3]->gpu_data();
Dtype* x_norm_diff = x_norm_.mutable_gpu_diff();
const int count = top[0]->count();
BatchNormPlusBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<< <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, scale_data, channels_, spatial_dim, x_norm_diff);
/*
for (int n = 0; n < num; n++){
for (int c = 0; c < channels_; c++){
Dtype factory = scale_data[c];
caffe_gpu_scale(spatial_dim, factory, top_diff, x_norm_diff);
top_diff += spatial_dim;
x_norm_diff += spatial_dim;
}
}
*/
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
caffe_gpu_div(temp_.count(), top_diff, temp_.gpu_data(), bottom_diff);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
//int num = bottom[0]->shape()[0];
//int spatial_dim = bottom[0]->count() / (channels_*bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
bottom_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., bottom_diff);
// sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_mul(temp_.count(), top_data, bottom_diff, bottom_diff);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
top_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num * channels_,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., bottom_diff);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
caffe_gpu_axpby(temp_.count(), Dtype(1), top_diff,
Dtype(-1. / (num * spatial_dim)), bottom_diff);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
caffe_gpu_div(temp_.count(), bottom_diff, temp_.gpu_data(), bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchNormPlusLayer);
} // namespace caffe
| 68a9afd9d60b6ab49b48610e97d3d919da52120c.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/batch_norm_plus_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <iostream>
namespace caffe {
template <typename Dtype>
__global__ void BatchNormPlusForward(const int n, Dtype* in,const Dtype* scale,
const Dtype* bias, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
void BatchNormPlusLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->shape(0);
int spatial_dim = bottom[0]->count() / (channels_*bottom[0]->shape(0));
if (bottom[0] != top[0]) {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[0]->gpu_data(), mean_.mutable_gpu_data());
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[1]->gpu_data(), variance_.mutable_gpu_data());
}
else {
// compute mean
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), bottom_data,
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
}
// subtract mean
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, -1, num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., top_data);
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
caffe_gpu_powx(top[0]->count(), top_data, Dtype(2),
temp_.mutable_gpu_data()); // (X-EX)^2
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), temp_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
variance_.mutable_gpu_data()); // E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
caffe_gpu_axpby(mean_.count(), Dtype(1), mean_.gpu_data(),
moving_average_fraction_, this->blobs_[0]->mutable_gpu_data());
int m = bottom[0]->count() / channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m) / (m - 1) : 1;
caffe_gpu_axpby(variance_.count(), bias_correction_factor,
variance_.gpu_data(), moving_average_fraction_,
this->blobs_[1]->mutable_gpu_data());
}
// normalize variance
caffe_gpu_add_scalar(variance_.count(), eps_, variance_.mutable_gpu_data());
caffe_gpu_powx(variance_.count(), variance_.gpu_data(), Dtype(0.5),
variance_.mutable_gpu_data());
// replicate variance to input size
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), variance_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data());
caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data);
// TODO(cdoersch): The caching is only needed because later in-place layers
// might clobber the data. Can we skip this if they won't?
if (!use_global_stats_) {
caffe_copy(x_norm_.count(), top_data,
x_norm_.mutable_gpu_data());
}
// add scale layer by zhangfeng
bool scale_bias = this->layer_param_.batch_norm_plus_param().scale_bias();
if (scale_bias){
const Dtype* scale_data = this->blobs_[3]->gpu_data();
const Dtype* bias_data = this->blobs_[4]->gpu_data();
const int count = top[0]->count();
BatchNormPlusForward<Dtype> << <CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS >> >(
count, top_data,scale_data, bias_data, channels_, spatial_dim, top_data);
}
caffe_copy(x_temp_top.count(), top_data, x_temp_top.mutable_gpu_data());
}
template <typename Dtype>
__global__ void BatchNormPlusBackward(const int n, const Dtype* in, const Dtype* scale,
const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
void BatchNormPlusLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff;
int num = bottom[0]->shape(0);
int spatial_dim = bottom[0]->count() / (bottom[0]->shape(0)*channels_);
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
}
else {
caffe_copy(x_temp_top.count(), top[0]->gpu_diff(),
x_temp_top.mutable_gpu_diff());
top_diff = x_temp_top.gpu_diff();
}
//scale
bool scale_bias = this->layer_param_.batch_norm_plus_param().scale_bias();//
if (scale_bias){//需要计算alpha和beta
//scale
Dtype* scale_diff = this->blobs_[3]->mutable_gpu_diff();
//1 dE/dy * x_norm
caffe_gpu_mul<Dtype>(top[0]->count(), top_diff, x_norm_.gpu_data(), x_temp_top.mutable_gpu_data());
// 2.求sum
caffe_gpu_gemv<Dtype>(CblasNoTrans, num*channels_, spatial_dim, 1.,
x_temp_top.gpu_data(), spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data()); // NC* HW * HW(1)*1 = NC*1
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0, scale_diff);
// bias DE/dy
Dtype* bias_diff = this->blobs_[4]->mutable_gpu_diff();
caffe_gpu_gemv<Dtype>(CblasNoTrans, num*channels_, spatial_dim, 1.,
top_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data()); // NC* HW * HW(1)*1 = NC*1
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0, bias_diff);
//计算dE/dx= dE/dy * scale; 在求sum
const Dtype* scale_data = this->blobs_[3]->gpu_data();
Dtype* x_norm_diff = x_norm_.mutable_gpu_diff();
const int count = top[0]->count();
BatchNormPlusBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<< <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(
count, top_diff, scale_data, channels_, spatial_dim, x_norm_diff);
/*
for (int n = 0; n < num; n++){
for (int c = 0; c < channels_; c++){
Dtype factory = scale_data[c];
caffe_gpu_scale(spatial_dim, factory, top_diff, x_norm_diff);
top_diff += spatial_dim;
x_norm_diff += spatial_dim;
}
}
*/
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
caffe_gpu_div(temp_.count(), top_diff, temp_.gpu_data(), bottom_diff);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
//int num = bottom[0]->shape()[0];
//int spatial_dim = bottom[0]->count() / (channels_*bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
caffe_gpu_mul(temp_.count(), top_data, top_diff, bottom_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
bottom_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., bottom_diff);
// sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_mul(temp_.count(), top_data, bottom_diff, bottom_diff);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
top_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num * channels_,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., bottom_diff);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
caffe_gpu_axpby(temp_.count(), Dtype(1), top_diff,
Dtype(-1. / (num * spatial_dim)), bottom_diff);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
caffe_gpu_div(temp_.count(), bottom_diff, temp_.gpu_data(), bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchNormPlusLayer);
} // namespace caffe
|
b9612e3ce341d02ebbaf156444c2f989701f511e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//nvcc fractalSimpleCPU.cu -o temp -lglut -lGL -lm
#include <GL/glut.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include "../helpers/helper.h"
// Dimensions and scale
#define DIM 768 // window size = DIMxDIM
/* ========================================================================== */
/* SECTION: Globals */
/* ========================================================================== */
double gpuTimer;
double cpuTimer;
int displayCPUTrigger = 0;
float *pixels;
unsigned int seedChoice;
__constant__ float2 d_Seed;
float2 h_Default;
float2 h_Nicholas;
float2 h_Broccoli;
float2 h_Wyatt;
float2 h_Custom;
int drag = 0;
int mouse_i = 0;
int mouse_step = 10;
int oldX = 0;
int oldY = 0;
float shiftX = 0.0;
float shiftY = 0.0;
double scale = 0.7;
int tick = 0;
/* ========================================================================== */
/* SECTION: Device Functions and Kernel */
/* ========================================================================== */
__device__ float4 dGetColor(double x, double y, int seedChoice, double scale, int tick)
{
float4 color;
double mag,maxMag,t1;
double maxCount = 255 + 2.0*tick*logf(scale);
double count = 0;
maxMag = 10.0;
mag = 0.0;
while (mag < maxMag && count < maxCount)
{
t1 = x;
x = x*x - y*y + d_Seed.x;
y = (2.0 * t1 * y) + d_Seed.y;
mag = sqrtf(x*x + y*y);
count++;
}
float v = count/maxCount;
if(seedChoice==3){v = v;}
color = dHSVToRGB(make_float3(100.0/360.0, 1.0, v));
return(color);
}
__global__ void kernel(float *px, int seedChoice,
float shiftX, float shiftY,
float scale, int tick)
{
double x = threadIdx.x + blockIdx.x*blockDim.x;
double y = threadIdx.y + blockIdx.y*blockDim.y;
int id = x + y*gridDim.x*blockDim.x;
int idx = id*3;
x = (((2.0*x/DIM) - 1 )/scale) - shiftX/DIM;
y = (((2.0*y/DIM) - 1 )/scale) + shiftY/DIM;
float4 color = dGetColor(x, y, seedChoice, scale, tick);
px[idx] = color.x;
px[idx + 1] = color.y;
px[idx + 2] = color.z;
}
/* ========================================================================== */
/* SECTION: Host Functions */
/* ========================================================================== */
__host__ float2 hGetSeed(int seedChoice)
{
switch(seedChoice)
{
case 0:
return(h_Default);
case 1:
return(h_Nicholas);
case 2:
return(h_Broccoli);
case 3:
return(h_Wyatt);
case 4:
return(h_Custom);
default:
return(h_Default);
}
}
__host__ float4 hGetColor(float x, float y, int seedChoice)
{
float mag,maxMag,t1;
float maxCount = 10000;
float count = 0;
maxMag = 10;
mag = 0.0;
float2 seed = hGetSeed(seedChoice);
while (mag < maxMag && count < maxCount)
{
t1 = x;
x = x*x - y*y + seed.x;
y = (2.0 * t1 * y) + seed.y;
mag = sqrt(x*x + y*y);
count++;
}
float v = 20*count/maxCount;
if(seedChoice==3){v = v/20;}
float4 color = hHSVToRGB(make_float3(200.0/360.0, 1.0, v));
return(color);
}
/* ========================================================================== */
/* SECTION: OpenGL Functions */
/* ========================================================================== */
void displayGPU(void)
{
// Clear window to background color
glClear(GL_COLOR_BUFFER_BIT);
glFlush();
// Start timer
startTimer(&gpuTimer);
float *px_CPU;
float *px_GPU;
dim3 dimBlock;
dim3 dimGrid;
// Setup grid layout
// - Using 2-D blocks and grid for easier coordinate transformations.
// - Using multiples of 32 for more efficient warps.
dimBlock.x = 32;
dimBlock.y = 32;
dimBlock.z = 1;
dimGrid.x = DIM/32;
dimGrid.y = DIM/32;
dimGrid.z = 1;
// Allocate memory for pixel data on host and device
px_CPU = (float*)malloc(DIM*DIM*3*sizeof(float));
hipMalloc(&px_GPU, DIM*DIM*3*sizeof(float));
// Run the kernel
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, px_GPU, seedChoice, shiftX, shiftY, scale, tick);
// Copy pixel data from device to host
// - HANDLE_ERROR (...) is a very convenient function used to catch
// most errors that happen on the device. Can be found in the
// helper.h file.
HANDLE_ERROR( hipPeekAtLastError() );
HANDLE_ERROR( hipMemcpy(px_CPU, px_GPU, DIM*DIM*3*sizeof(float), hipMemcpyDeviceToHost) );
// Draw pixels and free pixel data from host and device memory
glDrawPixels(DIM, DIM, GL_RGB, GL_FLOAT, px_CPU);
glFlush();
// End timer
endTimer(&gpuTimer);
char *title = (char*)malloc(100*sizeof(char));
sprintf(title, "GPU | Time to render:\t %.5f s\n", gpuTimer/1000000);
glutSetWindowTitle(title);
free(px_CPU);
hipFree(px_GPU);
}
void displayCPU(void)
{
// Clear window to background color
glClear(GL_COLOR_BUFFER_BIT);
glFlush();
if (displayCPUTrigger == 0)
{
printf("displayCPUTrigger = %d\n", displayCPUTrigger);
displayCPUTrigger = 1;
// Start timer
startTimer(&cpuTimer);
// Set zoom level
float xMin = -1/scale;
float xMax = 1/scale;
float yMin = -1/scale;
float yMax = 1/scale;
// Transformation of pixel coordinates to fractal-space coordinates
float stepSizeX = (xMax - xMin)/((float)DIM);
float stepSizeY = (yMax - yMin)/((float)DIM);
float x, y;
float4 px_color;
int k;
// Allocate memory for pixel data
pixels = (float *)malloc(DIM*DIM*3*sizeof(float));
// Iterate through and set the pixel data
k = 0;
y = yMin;
while(y < yMax)
{
x = xMin;
while(x < xMax)
{
px_color = hGetColor(x, y, seedChoice);
pixels[k] = px_color.x; //Red on or off returned from color
pixels[k+1] = px_color.y; //Green off
pixels[k+2] = px_color.z; //Blue off
k=k+3; //Skip to next pixel
x += stepSizeX;
}
y += stepSizeY;
}
// Draw pixels and free pixel data from memory
glDrawPixels(DIM, DIM, GL_RGB, GL_FLOAT, pixels);
glFlush();
// End timer
endTimer(&cpuTimer);
char *title = (char*)malloc(100*sizeof(char));
sprintf(title, "CPU | Time to render:\t %.5f s\n", cpuTimer/1000000);
glutSetWindowTitle(title);
}
else
{
// Draw pixels and free pixel data from memory
glDrawPixels(DIM, DIM, GL_RGB, GL_FLOAT, pixels);
glFlush();
free(pixels);
}
}
void keypress(unsigned char key, int x, int y)
{
switch(key)
{
case 27:
exit(0);
}
}
void startDrag(int mx, int my)
{
drag = 1;
}
void mouse_move(int mx, int my)
{
mouse_i++;
if(mouse_i > mouse_step)
{
mouse_i = 0;
}
if(drag && mouse_i/mouse_step)
{
shiftX += (mx - oldX)/scale;
shiftY += (my - oldY)/scale;
glutPostRedisplay();
}
}
void mouse(int button, int state, int mx, int my)
{
switch(button)
{
case GLUT_LEFT_BUTTON:
if(state==GLUT_DOWN)
{
drag = 1;
oldX = mx;
oldY = my;
}
if(state==GLUT_UP)
{
drag = 0;
oldX = mx;
oldY = my;
}
break;
case 3:
scale += scale*0.5;
tick += 1;
glutPostRedisplay();
break;
case 4:
scale -= scale*0.5;
tick -= 1;
glutPostRedisplay();
break;
}
}
/* ========================================================================== */
/* SECTION: Main */
/* ========================================================================== */
int main(int argc, char** argv)
{
// Store predefined seeds
h_Default = make_float2(-0.7531930315709545, 0.05331999448114999);
h_Nicholas = make_float2(-0.3740480961923849, -0.6066666719669807);
h_Broccoli = make_float2(-0.3948897795591184, -0.5863460624863006);
h_Wyatt = make_float2(-0.824, -0.1711);
h_Custom = make_float2(0.0, 0.0);
// Prompt for seed choice
printf("\n Enter seed choice:\n");
printf("\t0: Default\n");
printf("\t1: Nicholas\n");
printf("\t2: Broccoli\n");
printf("\t3: Wyatt\n");
printf("\t4: Enter Custom Seed\n\n\t> ");
scanf("%d", &seedChoice);
// Load seed data onto device
switch(seedChoice)
{
case 0:
hipMemcpyToSymbol(d_Seed, &h_Default, sizeof(float2), 0, hipMemcpyHostToDevice);
break;
case 1:
hipMemcpyToSymbol(d_Seed, &h_Nicholas, sizeof(float2), 0, hipMemcpyHostToDevice);
break;
case 2:
hipMemcpyToSymbol(d_Seed, &h_Broccoli, sizeof(float2), 0, hipMemcpyHostToDevice);
break;
case 3:
hipMemcpyToSymbol(d_Seed, &h_Wyatt, sizeof(float2), 0, hipMemcpyHostToDevice);
break;
case 4:
scanf("\n Enter real: %f", &h_Custom.x);
scanf("\n Enter imaginary: %f", &h_Custom.y);
hipMemcpyToSymbol(d_Seed, &h_Custom, sizeof(float2), 0, hipMemcpyHostToDevice);
break;
default:
hipMemcpyToSymbol(d_Seed, &h_Default, sizeof(float2), 0, hipMemcpyHostToDevice);
}
// TODO : printf("Loaded seed information on GPU in ")
// Initialize OpenGL
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE);
glutInitWindowSize(DIM, DIM);
glutInitWindowPosition((50+1822+1680-DIM)/2,
(500+1050+1050-DIM)/2);
// Create first window
glutCreateWindow("GPU | Time to render:\t---");
glutDisplayFunc(displayGPU);
glutKeyboardFunc(keypress);
glutMouseFunc(mouse);
glutMotionFunc(mouse_move);
// Store position of the GPU window in order to
// initialize the CPU window next to the GPU window
int posX, posY;
posX = glutGet(GLUT_WINDOW_X);
posY = glutGet(GLUT_WINDOW_Y);
// Create second window
glutInitWindowPosition(posX+DIM,posY);
glutCreateWindow("CPU | Time to render:\t---");
glutDisplayFunc(displayCPU);
glutKeyboardFunc(keypress);
glClearColor(0.1, 0.1, 0.1, 0.1);
glutMainLoop();
return(0);
}
| b9612e3ce341d02ebbaf156444c2f989701f511e.cu | //nvcc fractalSimpleCPU.cu -o temp -lglut -lGL -lm
#include <GL/glut.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include "../helpers/helper.h"
// Dimensions and scale
#define DIM 768 // window size = DIMxDIM
/* ========================================================================== */
/* SECTION: Globals */
/* ========================================================================== */
double gpuTimer;
double cpuTimer;
int displayCPUTrigger = 0;
float *pixels;
unsigned int seedChoice;
__constant__ float2 d_Seed;
float2 h_Default;
float2 h_Nicholas;
float2 h_Broccoli;
float2 h_Wyatt;
float2 h_Custom;
int drag = 0;
int mouse_i = 0;
int mouse_step = 10;
int oldX = 0;
int oldY = 0;
float shiftX = 0.0;
float shiftY = 0.0;
double scale = 0.7;
int tick = 0;
/* ========================================================================== */
/* SECTION: Device Functions and Kernel */
/* ========================================================================== */
__device__ float4 dGetColor(double x, double y, int seedChoice, double scale, int tick)
{
float4 color;
double mag,maxMag,t1;
double maxCount = 255 + 2.0*tick*logf(scale);
double count = 0;
maxMag = 10.0;
mag = 0.0;
while (mag < maxMag && count < maxCount)
{
t1 = x;
x = x*x - y*y + d_Seed.x;
y = (2.0 * t1 * y) + d_Seed.y;
mag = sqrtf(x*x + y*y);
count++;
}
float v = count/maxCount;
if(seedChoice==3){v = v;}
color = dHSVToRGB(make_float3(100.0/360.0, 1.0, v));
return(color);
}
__global__ void kernel(float *px, int seedChoice,
float shiftX, float shiftY,
float scale, int tick)
{
double x = threadIdx.x + blockIdx.x*blockDim.x;
double y = threadIdx.y + blockIdx.y*blockDim.y;
int id = x + y*gridDim.x*blockDim.x;
int idx = id*3;
x = (((2.0*x/DIM) - 1 )/scale) - shiftX/DIM;
y = (((2.0*y/DIM) - 1 )/scale) + shiftY/DIM;
float4 color = dGetColor(x, y, seedChoice, scale, tick);
px[idx] = color.x;
px[idx + 1] = color.y;
px[idx + 2] = color.z;
}
/* ========================================================================== */
/* SECTION: Host Functions */
/* ========================================================================== */
__host__ float2 hGetSeed(int seedChoice)
{
switch(seedChoice)
{
case 0:
return(h_Default);
case 1:
return(h_Nicholas);
case 2:
return(h_Broccoli);
case 3:
return(h_Wyatt);
case 4:
return(h_Custom);
default:
return(h_Default);
}
}
__host__ float4 hGetColor(float x, float y, int seedChoice)
{
float mag,maxMag,t1;
float maxCount = 10000;
float count = 0;
maxMag = 10;
mag = 0.0;
float2 seed = hGetSeed(seedChoice);
while (mag < maxMag && count < maxCount)
{
t1 = x;
x = x*x - y*y + seed.x;
y = (2.0 * t1 * y) + seed.y;
mag = sqrt(x*x + y*y);
count++;
}
float v = 20*count/maxCount;
if(seedChoice==3){v = v/20;}
float4 color = hHSVToRGB(make_float3(200.0/360.0, 1.0, v));
return(color);
}
/* ========================================================================== */
/* SECTION: OpenGL Functions */
/* ========================================================================== */
void displayGPU(void)
{
// Clear window to background color
glClear(GL_COLOR_BUFFER_BIT);
glFlush();
// Start timer
startTimer(&gpuTimer);
float *px_CPU;
float *px_GPU;
dim3 dimBlock;
dim3 dimGrid;
// Setup grid layout
// - Using 2-D blocks and grid for easier coordinate transformations.
// - Using multiples of 32 for more efficient warps.
dimBlock.x = 32;
dimBlock.y = 32;
dimBlock.z = 1;
dimGrid.x = DIM/32;
dimGrid.y = DIM/32;
dimGrid.z = 1;
// Allocate memory for pixel data on host and device
px_CPU = (float*)malloc(DIM*DIM*3*sizeof(float));
cudaMalloc(&px_GPU, DIM*DIM*3*sizeof(float));
// Run the kernel
kernel<<<dimGrid, dimBlock>>>(px_GPU, seedChoice, shiftX, shiftY, scale, tick);
// Copy pixel data from device to host
// - HANDLE_ERROR (...) is a very convenient function used to catch
// most errors that happen on the device. Can be found in the
// helper.h file.
HANDLE_ERROR( cudaPeekAtLastError() );
HANDLE_ERROR( cudaMemcpy(px_CPU, px_GPU, DIM*DIM*3*sizeof(float), cudaMemcpyDeviceToHost) );
// Draw pixels and free pixel data from host and device memory
glDrawPixels(DIM, DIM, GL_RGB, GL_FLOAT, px_CPU);
glFlush();
// End timer
endTimer(&gpuTimer);
char *title = (char*)malloc(100*sizeof(char));
sprintf(title, "GPU | Time to render:\t %.5f s\n", gpuTimer/1000000);
glutSetWindowTitle(title);
free(px_CPU);
cudaFree(px_GPU);
}
void displayCPU(void)
{
// Clear window to background color
glClear(GL_COLOR_BUFFER_BIT);
glFlush();
if (displayCPUTrigger == 0)
{
printf("displayCPUTrigger = %d\n", displayCPUTrigger);
displayCPUTrigger = 1;
// Start timer
startTimer(&cpuTimer);
// Set zoom level
float xMin = -1/scale;
float xMax = 1/scale;
float yMin = -1/scale;
float yMax = 1/scale;
// Transformation of pixel coordinates to fractal-space coordinates
float stepSizeX = (xMax - xMin)/((float)DIM);
float stepSizeY = (yMax - yMin)/((float)DIM);
float x, y;
float4 px_color;
int k;
// Allocate memory for pixel data
pixels = (float *)malloc(DIM*DIM*3*sizeof(float));
// Iterate through and set the pixel data
k = 0;
y = yMin;
while(y < yMax)
{
x = xMin;
while(x < xMax)
{
px_color = hGetColor(x, y, seedChoice);
pixels[k] = px_color.x; //Red on or off returned from color
pixels[k+1] = px_color.y; //Green off
pixels[k+2] = px_color.z; //Blue off
k=k+3; //Skip to next pixel
x += stepSizeX;
}
y += stepSizeY;
}
// Draw pixels and free pixel data from memory
glDrawPixels(DIM, DIM, GL_RGB, GL_FLOAT, pixels);
glFlush();
// End timer
endTimer(&cpuTimer);
char *title = (char*)malloc(100*sizeof(char));
sprintf(title, "CPU | Time to render:\t %.5f s\n", cpuTimer/1000000);
glutSetWindowTitle(title);
}
else
{
// Draw pixels and free pixel data from memory
glDrawPixels(DIM, DIM, GL_RGB, GL_FLOAT, pixels);
glFlush();
free(pixels);
}
}
void keypress(unsigned char key, int x, int y)
{
switch(key)
{
case 27:
exit(0);
}
}
void startDrag(int mx, int my)
{
drag = 1;
}
void mouse_move(int mx, int my)
{
mouse_i++;
if(mouse_i > mouse_step)
{
mouse_i = 0;
}
if(drag && mouse_i/mouse_step)
{
shiftX += (mx - oldX)/scale;
shiftY += (my - oldY)/scale;
glutPostRedisplay();
}
}
void mouse(int button, int state, int mx, int my)
{
switch(button)
{
case GLUT_LEFT_BUTTON:
if(state==GLUT_DOWN)
{
drag = 1;
oldX = mx;
oldY = my;
}
if(state==GLUT_UP)
{
drag = 0;
oldX = mx;
oldY = my;
}
break;
case 3:
scale += scale*0.5;
tick += 1;
glutPostRedisplay();
break;
case 4:
scale -= scale*0.5;
tick -= 1;
glutPostRedisplay();
break;
}
}
/* ========================================================================== */
/* SECTION: Main */
/* ========================================================================== */
int main(int argc, char** argv)
{
// Store predefined seeds
h_Default = make_float2(-0.7531930315709545, 0.05331999448114999);
h_Nicholas = make_float2(-0.3740480961923849, -0.6066666719669807);
h_Broccoli = make_float2(-0.3948897795591184, -0.5863460624863006);
h_Wyatt = make_float2(-0.824, -0.1711);
h_Custom = make_float2(0.0, 0.0);
// Prompt for seed choice
printf("\n Enter seed choice:\n");
printf("\t0: Default\n");
printf("\t1: Nicholas\n");
printf("\t2: Broccoli\n");
printf("\t3: Wyatt\n");
printf("\t4: Enter Custom Seed\n\n\t> ");
scanf("%d", &seedChoice);
// Load seed data onto device
switch(seedChoice)
{
case 0:
cudaMemcpyToSymbol(d_Seed, &h_Default, sizeof(float2), 0, cudaMemcpyHostToDevice);
break;
case 1:
cudaMemcpyToSymbol(d_Seed, &h_Nicholas, sizeof(float2), 0, cudaMemcpyHostToDevice);
break;
case 2:
cudaMemcpyToSymbol(d_Seed, &h_Broccoli, sizeof(float2), 0, cudaMemcpyHostToDevice);
break;
case 3:
cudaMemcpyToSymbol(d_Seed, &h_Wyatt, sizeof(float2), 0, cudaMemcpyHostToDevice);
break;
case 4:
scanf("\n Enter real: %f", &h_Custom.x);
scanf("\n Enter imaginary: %f", &h_Custom.y);
cudaMemcpyToSymbol(d_Seed, &h_Custom, sizeof(float2), 0, cudaMemcpyHostToDevice);
break;
default:
cudaMemcpyToSymbol(d_Seed, &h_Default, sizeof(float2), 0, cudaMemcpyHostToDevice);
}
// TODO : printf("Loaded seed information on GPU in ")
// Initialize OpenGL
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE);
glutInitWindowSize(DIM, DIM);
glutInitWindowPosition((50+1822+1680-DIM)/2,
(500+1050+1050-DIM)/2);
// Create first window
glutCreateWindow("GPU | Time to render:\t---");
glutDisplayFunc(displayGPU);
glutKeyboardFunc(keypress);
glutMouseFunc(mouse);
glutMotionFunc(mouse_move);
// Store position of the GPU window in order to
// initialize the CPU window next to the GPU window
int posX, posY;
posX = glutGet(GLUT_WINDOW_X);
posY = glutGet(GLUT_WINDOW_Y);
// Create second window
glutInitWindowPosition(posX+DIM,posY);
glutCreateWindow("CPU | Time to render:\t---");
glutDisplayFunc(displayCPU);
glutKeyboardFunc(keypress);
glClearColor(0.1, 0.1, 0.1, 0.1);
glutMainLoop();
return(0);
}
|
385760de033cb6ca5e03b79828b92f9061c27727.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*MIT License
Copyright (c) 2019 Xavier Martinez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#define MAX_VERTEX 15
// __constant__ int MAX_VERTEX = 15;
__constant__ unsigned int edgeTable[256] =
{
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
};
__constant__ int triTable[256][16] = {
{ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1 },
{ 8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1 },
{ 3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1 },
{ 4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1 },
{ 4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1 },
{ 9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1 },
{ 10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1 },
{ 5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1 },
{ 5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1 },
{ 8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1 },
{ 2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1 },
{ 2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1 },
{ 11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1 },
{ 5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1 },
{ 11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1 },
{ 11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1 },
{ 2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1 },
{ 6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1 },
{ 3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1 },
{ 6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1 },
{ 6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1 },
{ 8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1 },
{ 7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1 },
{ 3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1 },
{ 0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1 },
{ 9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1 },
{ 8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1 },
{ 5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1 },
{ 0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1 },
{ 6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1 },
{ 10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1 },
{ 1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1 },
{ 0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1 },
{ 3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1 },
{ 6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1 },
{ 9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1 },
{ 8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1 },
{ 3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1 },
{ 10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1 },
{ 10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1 },
{ 2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1 },
{ 7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1 },
{ 2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1 },
{ 1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1 },
{ 11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1 },
{ 8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1 },
{ 0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1 },
{ 7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1 },
{ 7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1 },
{ 10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1 },
{ 0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1 },
{ 7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1 },
{ 6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1 },
{ 4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1 },
{ 10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1 },
{ 8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1 },
{ 1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1 },
{ 10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1 },
{ 10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1 },
{ 9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1 },
{ 7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1 },
{ 3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1 },
{ 7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1 },
{ 3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1 },
{ 6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1 },
{ 9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1 },
{ 1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1 },
{ 4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1 },
{ 7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1 },
{ 6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1 },
{ 0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1 },
{ 6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1 },
{ 0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1 },
{ 11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1 },
{ 6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1 },
{ 5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1 },
{ 9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1 },
{ 1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1 },
{ 10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1 },
{ 0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1 },
{ 11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1 },
{ 9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1 },
{ 7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1 },
{ 2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1 },
{ 9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1 },
{ 9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1 },
{ 1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1 },
{ 0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1 },
{ 10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1 },
{ 2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1 },
{ 0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1 },
{ 0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1 },
{ 9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1 },
{ 5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1 },
{ 5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1 },
{ 8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1 },
{ 9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1 },
{ 1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1 },
{ 3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1 },
{ 4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1 },
{ 9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1 },
{ 11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1 },
{ 2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1 },
{ 9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1 },
{ 3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1 },
{ 1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1 },
{ 4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1 },
{ 0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1 },
{ 1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }
};
__constant__ int nbTriTable[256] {
0, 3, 3, 6, 3, 6, 6, 9, 3, 6, 6, 9, 6, 9, 9, 6, 3,
6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9,
3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12,
9, 6, 9, 9, 6, 9, 12, 12, 9, 9, 12, 12, 9, 12,
15, 15, 6, 3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12,
9, 12, 12, 9, 6, 9, 9, 12, 9, 12, 12, 15, 9, 12,
12, 15, 12, 15, 15, 12, 6, 9, 9, 12, 9, 12, 6,
9, 9, 12, 12, 15, 12, 15, 9, 6, 9, 12, 12, 9, 12,
15, 9, 6, 12, 15, 15, 12, 15, 6, 12, 3, 3, 6, 6,
9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 6, 9,
9, 12, 9, 12, 12, 15, 9, 6, 12, 9, 12, 9, 15, 6,
6, 9, 9, 12, 9, 12, 12, 15, 9, 12, 12, 15, 12,
15, 15, 12, 9, 12, 12, 9, 12, 15, 15, 12, 12,
9, 15, 6, 15, 12, 6, 3, 6, 9, 9, 12, 9, 12, 12,
15, 9, 12, 12, 15, 6, 9, 9, 6, 9, 12, 12, 15,
12, 15, 15, 6, 12, 9, 15, 12, 9, 6, 12, 3, 9,
12, 12, 15, 12, 15, 9, 12, 12, 15, 15, 6, 9,
12, 6, 3, 6, 9, 9, 6, 9, 12, 6, 3, 9, 6, 12, 3,
6, 3, 3, 0
};
struct point {
int3 xyz;
float val;
};
__device__ float3 linearInterpolation(float isovalue, point voxelA, point voxelB) {
float mu;
float3 p;
if (abs(isovalue - voxelA.val) < EPSILON) {
p.x = (float)voxelA.xyz.x;
p.y = (float)voxelA.xyz.y;
p.z = (float)voxelA.xyz.z;
return (p);
}
if (abs(isovalue - voxelB.val) < EPSILON) {
p.x = (float)voxelB.xyz.x;
p.y = (float)voxelB.xyz.y;
p.z = (float)voxelB.xyz.z;
return (p);
}
if (abs(voxelA.val - voxelB.val) < EPSILON) {
p.x = (float)voxelA.xyz.x;
p.y = (float)voxelA.xyz.y;
p.z = (float)voxelA.xyz.z;
return (p);
}
mu = (isovalue - voxelA.val) / (voxelB.val - voxelA.val);
p.x = voxelA.xyz.x + mu * (voxelB.xyz.x - voxelA.xyz.x);
p.y = voxelA.xyz.y + mu * (voxelB.xyz.y - voxelA.xyz.y);
p.z = voxelA.xyz.z + mu * (voxelB.xyz.z - voxelA.xyz.z);
return p;
// const float scale = (isovalue - voxelA.val) / (voxelB.val - voxelA.val);
// //Coordinates of position will be in float3
// float3 position;
// //Initialising position
// position.x = voxelA.xyz.x + scale * (voxelB.xyz.x - voxelA.xyz.x);
// position.y = voxelA.xyz.y + scale * (voxelB.xyz.y - voxelA.xyz.y);
// position.z = voxelA.xyz.z + scale * (voxelB.xyz.z - voxelA.xyz.z);
// return position;
}
__device__ inline unsigned int to1D(int3 ids, int3 dim) {
return (dim.y * dim.z * ids.x) + (dim.z * ids.y) + ids.z;
}
__device__ inline unsigned int to1D(uint3 ids, int3 dim) {
return (dim.y * dim.z * ids.x) + (dim.z * ids.y) + ids.z;
}
// __global__ void MCkernel(float isovalue, int3 dim, float* data, float3* vertex, int* triangle, int3 offset, unsigned int nbcells)
// {
// unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x) + offset.x;
// unsigned int j = (blockIdx.y * blockDim.y + threadIdx.y) + offset.y;
// unsigned int k = (blockIdx.z * blockDim.z + threadIdx.z) + offset.z;
// if (i >= dim.x - 1)
// return;
// if (j >= dim.y - 1)
// return;
// if (k >= dim.z - 1)
// return;
// //Variables
// point voxels[8];
// float3 vertlist[12];
// int index[8];
// int cubeIndex = 0;
// float3 vertices[MAX_VERTEX];
// int numTriangles = 0;
// int numVertices = 0;
// index[0] = to1D(make_int3(i, j, k), dim);
// index[1] = to1D(make_int3(i + 1, j, k), dim);
// index[2] = to1D(make_int3(i + 1, j + 1, k), dim);
// index[3] = to1D(make_int3(i, j + 1, k), dim);
// index[4] = to1D(make_int3(i, j, k + 1), dim);
// index[5] = to1D(make_int3(i + 1, j, k + 1), dim);
// index[6] = to1D(make_int3(i + 1, j + 1, k + 1), dim);
// index[7] = to1D(make_int3(i, j + 1, k + 1), dim);
// voxels[0].xyz.x = i;
// voxels[0].xyz.y = j;
// voxels[0].xyz.z = k;
// voxels[0].val = data[index[0]];
// voxels[1].xyz.x = i + 1;
// voxels[1].xyz.y = j;
// voxels[1].xyz.z = k;
// voxels[1].val = data[index[1]];
// voxels[2].xyz.x = i + 1;
// voxels[2].xyz.y = j + 1;
// voxels[2].xyz.z = k;
// voxels[2].val = data[index[2]];
// voxels[3].xyz.x = i;
// voxels[3].xyz.y = j + 1;
// voxels[3].xyz.z = k;
// voxels[3].val = data[index[3]];
// voxels[4].xyz.x = i;
// voxels[4].xyz.y = j;
// voxels[4].xyz.z = k + 1;
// voxels[4].val = data[index[4]];
// voxels[5].xyz.x = i + 1;
// voxels[5].xyz.y = j;
// voxels[5].xyz.z = k + 1;
// voxels[5].val = data[index[5]];
// voxels[6].xyz.x = i + 1;
// voxels[6].xyz.y = j + 1;
// voxels[6].xyz.z = k + 1;
// voxels[6].val = data[index[6]];
// voxels[7].xyz.x = i;
// voxels[7].xyz.y = j + 1;
// voxels[7].xyz.z = k + 1;
// voxels[7].val = data[index[7]];
// //PolygoniseCube
// if (voxels[0].val < isovalue) cubeIndex |= 1;
// if (voxels[1].val < isovalue) cubeIndex |= 2;
// if (voxels[2].val < isovalue) cubeIndex |= 4;
// if (voxels[3].val < isovalue) cubeIndex |= 8;
// if (voxels[4].val < isovalue) cubeIndex |= 16;
// if (voxels[5].val < isovalue) cubeIndex |= 32;
// if (voxels[6].val < isovalue) cubeIndex |= 64;
// if (voxels[7].val < isovalue) cubeIndex |= 128;
// //Getting edges
// unsigned int edges = edgeTable[cubeIndex];
// //Comparing edges with 12 bit by and operation and position coordinate
// if (edges == 0) {
// return;
// }
// if (edges & 1) {
// vertlist[0] = linearInterpolation(isovalue, voxels[0], voxels[1]);
// }
// if (edges & 2) {
// vertlist[1] = linearInterpolation(isovalue, voxels[1], voxels[2]);
// }
// if (edges & 4) {
// vertlist[2] = linearInterpolation(isovalue, voxels[2], voxels[3]);
// }
// if (edges & 8) {
// vertlist[3] = linearInterpolation(isovalue, voxels[3], voxels[0]);
// }
// if (edges & 16) {
// vertlist[4] = linearInterpolation(isovalue, voxels[4], voxels[5]);
// }
// if (edges & 32) {
// vertlist[5] = linearInterpolation(isovalue, voxels[5], voxels[6]);
// }
// if (edges & 64) {
// vertlist[6] = linearInterpolation(isovalue, voxels[6], voxels[7]);
// }
// if (edges & 128) {
// vertlist[7] = linearInterpolation(isovalue, voxels[7], voxels[4]);
// }
// if (edges & 256) {
// vertlist[8] = linearInterpolation(isovalue, voxels[0], voxels[4]);
// }
// if (edges & 512) {
// vertlist[9] = linearInterpolation(isovalue, voxels[1], voxels[5]);
// }
// if (edges & 1024) {
// vertlist[10] = linearInterpolation(isovalue, voxels[2], voxels[6]);
// }
// if (edges & 2048) {
// vertlist[11] = linearInterpolation(isovalue, voxels[3], voxels[7]);
// }
// if (cubeIndex >= 0 && cubeIndex < 256 ) {
// for (int i = 0; i < nbTriTable[cubeIndex]; i += 3) {
// if (triTable[cubeIndex][i] == -1 || numVertices + 3 >= MAX_VERTEX)
// break;
// vertices[numVertices++] = vertlist[triTable[cubeIndex][i]];
// vertices[numVertices++] = vertlist[triTable[cubeIndex][i + 1]];
// vertices[numVertices++] = vertlist[triTable[cubeIndex][i + 2]];
// ++numTriangles;
// }
// }
// // for (int n = 0; n < MAX_VERTEX; n += 3)
// // {
// // int edgeNumber = triTable[cubeIndex][n];
// // if (edgeNumber < 0)
// // break;
// // vertices[numVertices++] = pos[edgeNumber];
// // vertices[numVertices++] = pos[triTable[cubeIndex][n + 1]];
// // vertices[numVertices++] = pos[triTable[cubeIndex][n + 2]];
// // ++numTriangles;
// // }
// //Getting the number of triangles
// triangle[index[0]] = numTriangles;
// //Vertex List
// for (int n = 0; n < min(numVertices, MAX_VERTEX); ++n) {
// vertex[MAX_VERTEX * index[0] + n] = vertices[n];
// }
// }
__global__ void countVertexPerCell(const float isovalue, const int3 dim, const float* data, uint2 *vertPerCell, const int rangeSearch, const int3 offset) {
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
unsigned int j = (blockIdx.y * blockDim.y + threadIdx.y);
unsigned int k = (blockIdx.z * blockDim.z + threadIdx.z);
if (i > dim.x - 2)
return;
if (j > dim.y - 2)
return;
if (k > dim.z - 2)
return;
if(offset.x != 0 && i < rangeSearch - 3)
return;
if(offset.y != 0 && j < rangeSearch - 3)
return;
if(offset.z != 0 && k < rangeSearch - 3)
return;
if(offset.x == 0 && i >= dim.x - rangeSearch * 2 - 1)
return;
if(offset.y == 0 && j >= dim.y - rangeSearch * 2 - 1)
return;
if(offset.z == 0 && k >= dim.z - rangeSearch * 2 - 1)
return;
if(i >= dim.x - rangeSearch - 1)
return;
if(j >= dim.y - rangeSearch - 1)
return;
if(k >= dim.z - rangeSearch - 1)
return;
unsigned int id = to1D(make_int3(i, j, k), dim);
uint2 Nverts = make_uint2(0, 0);
float voxel0;
float voxel1;
float voxel2;
float voxel3;
float voxel4;
float voxel5;
float voxel6;
float voxel7;
// __shared__ float voxels[8];
// float3 vertlist[12];
// int index[8];
int cubeIndex = 0;
voxel0 = data[to1D(make_int3(i, j, k), dim)];
voxel1 = data[to1D(make_int3(i + 1, j, k), dim)];
voxel2 = data[to1D(make_int3(i + 1, j + 1, k), dim)];
voxel3 = data[to1D(make_int3(i, j + 1, k), dim)];
voxel4 = data[to1D(make_int3(i, j, k + 1), dim)];
voxel5 = data[to1D(make_int3(i + 1, j, k + 1), dim)];
voxel6 = data[to1D(make_int3(i + 1, j + 1, k + 1), dim)];
voxel7 = data[to1D(make_int3(i, j + 1, k + 1), dim)];
//PolygoniseCube
// if (voxel0 < isovalue) cubeIndex |= 1;
// if (voxel1 < isovalue) cubeIndex |= 2;
// if (voxel2 < isovalue) cubeIndex |= 4;
// if (voxel3 < isovalue) cubeIndex |= 8;
// if (voxel4 < isovalue) cubeIndex |= 16;
// if (voxel5 < isovalue) cubeIndex |= 32;
// if (voxel6 < isovalue) cubeIndex |= 64;
// if (voxel7 < isovalue) cubeIndex |= 128;
cubeIndex = ((unsigned int) (voxel0 < isovalue));
cubeIndex += ((unsigned int) (voxel1 < isovalue))* 2;
cubeIndex += ((unsigned int) (voxel2 < isovalue))* 4;
cubeIndex += ((unsigned int) (voxel3 < isovalue))* 8;
cubeIndex += ((unsigned int) (voxel4 < isovalue))* 16;
cubeIndex += ((unsigned int) (voxel5 < isovalue))* 32;
cubeIndex += ((unsigned int) (voxel6 < isovalue))* 64;
cubeIndex += ((unsigned int) (voxel7 < isovalue))* 128;
Nverts.x = nbTriTable[cubeIndex];
Nverts.y = (Nverts.x > 0);
vertPerCell[id] = Nverts;
}
__global__ void compactVoxels(unsigned int * compactedVoxelArray,
const uint2 *voxelOccupied,
unsigned int lastVoxel, unsigned int numVoxels,
unsigned int numVoxelsp1, int3 dim, const int rangeSearch, const int3 offset)
{
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
unsigned int j = (blockIdx.y * blockDim.y + threadIdx.y);
unsigned int k = (blockIdx.z * blockDim.z + threadIdx.z);
if (i > dim.x - 2)
return;
if (j > dim.y - 2)
return;
if (k > dim.z - 2)
return;
if(offset.x != 0 && i < rangeSearch - 3)
return;
if(offset.y != 0 && j < rangeSearch - 3)
return;
if(offset.z != 0 && k < rangeSearch - 3)
return;
if(offset.x == 0 && i >= dim.x - rangeSearch * 2 - 1)
return;
if(offset.y == 0 && j >= dim.y - rangeSearch * 2 - 1)
return;
if(offset.z == 0 && k >= dim.z - rangeSearch * 2 - 1)
return;
if(i >= dim.x - rangeSearch - 1)
return;
if(j >= dim.y - rangeSearch - 1)
return;
if(k >= dim.z - rangeSearch - 1)
return;
// if (i >= dim.x)
// return;
// if (j >= dim.y)
// return;
// if (k >= dim.z)
// return;
unsigned int id = to1D(make_int3(i, j, k), dim);
if (id < numVoxels) {
if ( (id < numVoxelsp1) ? voxelOccupied[id].y < voxelOccupied[id + 1].y : lastVoxel ) {
compactedVoxelArray[ voxelOccupied[id].y ] = id;
}
}
}
inline __device__ float3 lerp(float3 a, float3 b, float t)
{
return a + t * (b - a);
}
__device__ float3 gridPosition(uint3 cellPos, float3 originGrid, float dx) {
return (originGrid + (make_float3(cellPos.x, cellPos.y, cellPos.z) * dx) );
}
__device__ uint3 grid1DTo3D(unsigned int index, int3 gridDim) {
uint3 res;
res.x = index / (gridDim.y * gridDim.z); //Note the integer division . This is x
res.y = (index - res.x * gridDim.y * gridDim.z) / gridDim.z; //This is y
res.z = index - res.x * gridDim.y * gridDim.z - res.y * gridDim.z; //This is z
return res;
}
__device__ float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1) {
float t = (isolevel - f0) / (f1 - f0);
return lerp(p0, p1, t);
}
__global__ void generateTriangleVerticesSMEM(float3 *pos,
const unsigned int *compactedVoxelArray,
const uint2 * numVertsScanned,
const float *gridValues,
float4 originGridDX,
float isoValue, unsigned int activeVoxels,
unsigned int maxVertsM3, int3 dim, int3 offset)
{
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
unsigned int j = (blockIdx.y * blockDim.y + threadIdx.y);
unsigned int k = (blockIdx.z * blockDim.z + threadIdx.z);
// if (i >= dim.x)
// return;
// if (j >= dim.y)
// return;
// if (k >= dim.z)
// return;
int3 cudaGrid = make_int3(blockDim.x, blockDim.y, blockDim.z);
unsigned int id = to1D(make_int3(i, j, k), cudaGrid);
if (id >= activeVoxels)
return;
unsigned int voxel = compactedVoxelArray[id];
uint3 gridPos = grid1DTo3D(voxel, dim);
float dx = originGridDX.w;
float3 originGrid = make_float3(originGridDX.x, originGridDX.y, originGridDX.z);
float3 p = gridPosition(gridPos, originGrid, dx);
// calculate cell vertex positions
float3 v[8];
v[0] = p;
v[1] = p + make_float3(dx, 0, 0);
v[2] = p + make_float3(dx, dx, 0);
v[3] = p + make_float3(0, dx, 0);
v[4] = p + make_float3(0, 0, dx);
v[5] = p + make_float3(dx, 0, dx);
v[6] = p + make_float3(dx, dx, dx);
v[7] = p + make_float3(0, dx, dx);
float field[8];
field[0] = gridValues[voxel];
field[1] = gridValues[to1D(gridPos + make_uint3(1, 0, 0), dim)];
field[2] = gridValues[to1D(gridPos + make_uint3(1, 1, 0), dim)];
field[3] = gridValues[to1D(gridPos + make_uint3(0, 1, 0), dim)];
field[4] = gridValues[to1D(gridPos + make_uint3(0, 0, 1), dim)];
field[5] = gridValues[to1D(gridPos + make_uint3(1, 0, 1), dim)];
field[6] = gridValues[to1D(gridPos + make_uint3(1, 1, 1), dim)];
field[7] = gridValues[to1D(gridPos + make_uint3(0, 1, 1), dim)];
// recalculate flag
unsigned int cubeindex;
cubeindex = ((unsigned int)(field[0] < isoValue));
cubeindex += ((unsigned int)(field[1] < isoValue)) * 2;
cubeindex += ((unsigned int)(field[2] < isoValue)) * 4;
cubeindex += ((unsigned int)(field[3] < isoValue)) * 8;
cubeindex += ((unsigned int)(field[4] < isoValue)) * 16;
cubeindex += ((unsigned int)(field[5] < isoValue)) * 32;
cubeindex += ((unsigned int)(field[6] < isoValue)) * 64;
cubeindex += ((unsigned int)(field[7] < isoValue)) * 128;
// find the vertices where the surface intersects the cube
// Note: SIMD marching cubes implementations have no need
// for an edge table, because branch divergence eliminates any
// potential performance gain from only computing the per-edge
// vertices when indicated by the edgeTable.
// Use shared memory to keep register pressure under control.
// No need to call __syncthreads() since each thread uses its own
// private shared memory buffer.
__shared__ float3 vertlist[12 * NBTHREADS];
vertlist[threadIdx.x ] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]);
vertlist[(NBTHREADS * 1) + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]);
vertlist[(NBTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]);
vertlist[(NBTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]);
vertlist[(NBTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]);
vertlist[(NBTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]);
vertlist[(NBTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]);
vertlist[(NBTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]);
vertlist[(NBTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]);
vertlist[(NBTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]);
vertlist[(NBTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]);
vertlist[(NBTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]);
float3 offsetPos = make_float3(offset.x * dx, offset.y * dx, offset.z * dx);
// output triangle vertices
unsigned int numVerts = nbTriTable[cubeindex];
for (int i = 0; i < numVerts; i += 3) {
unsigned int index = numVertsScanned[voxel].x + i;
float3 *vert[3];
int edge;
edge = triTable[cubeindex][i];
vert[0] = &vertlist[(edge * NBTHREADS) + threadIdx.x];
edge = triTable[cubeindex][i + 1];
vert[1] = &vertlist[(edge * NBTHREADS) + threadIdx.x];
edge = triTable[cubeindex][i + 2];
vert[2] = &vertlist[(edge * NBTHREADS) + threadIdx.x];
if (index < maxVertsM3) {
pos[index ] = *vert[0] + offsetPos;
pos[index + 1] = *vert[1] + offsetPos;
pos[index + 2] = *vert[2] + offsetPos;
}
}
}
__global__ void groupVertices(float3 *verts, const unsigned int nbVerts, const float tolerance) {
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
if (i >= nbVerts)
return;
verts[i].x = round(verts[i].x / tolerance) * tolerance;
verts[i].y = round(verts[i].y / tolerance) * tolerance;
verts[i].z = round(verts[i].z / tolerance) * tolerance;
}
#define MAXNEIGHBOR 32
__global__ void LaplacianSmooth(float3 *verts, int *triangles, const unsigned int nbVerts, const unsigned int nbTris, const unsigned int ite) {
//For each vertex
unsigned int idx = (threadIdx.x + blockIdx.x * blockDim.x);
if (idx >= nbVerts) {
return;
}
/* int neighbors[MAXNEIGHBOR];
int curId = -1;
for (int i = 0; i < MAXNEIGHBOR; i++) {
neighbors[i] = -1;
}
for (int t = 0; t < nbTris && curId < MAXNEIGHBOR; t++) {
if (triangles[t * 3] == idx || triangles[t * 3 + 1] == idx || triangles[t * 3 + 2] == idx)
neighbors[curId++] = t;
// int compar = (triangles[t * 3] == idx || triangles[t * 3 + 1] == idx || triangles[t * 3 + 2] == idx);
// neighbors[curId*compar] = t;
// curId = compar + curId;
}
for (int t = 0; t < ite; ++t) {
float3 curV = make_float3(0,0,0);
for (int i = 0; i < curId; i++) {
int idv1 = triangles[neighbors[i] * 3 + 0];
int idv2 = triangles[neighbors[i] * 3 + 1];
int idv3 = triangles[neighbors[i] * 3 + 2];
if(idv1 != idx)
curV = curV + verts[idv1];
if(idv2 != idx)
curV = curV + verts[idv2];
if(idv3 != idx)
curV = curV + verts[idv3];
}
curV = curV / max(1,curId*2);
verts[idx] = curV;
}*/
int neighbors[MAXNEIGHBOR];//The 0 is not used
int curId = 1;
for (int i = 0; i < MAXNEIGHBOR; i++) {
neighbors[i] = -1;
}
//TODO: Could use shared mem here
//Get 31 neighbors max
for (int t = 0; t < nbTris && curId < MAXNEIGHBOR; t++) {
int compar = (triangles[t * 3] == idx || triangles[t * 3 + 1] == idx || triangles[t * 3 + 2] == idx);
neighbors[curId * compar] = t;
curId = compar + curId;
}
for (int t = 0; t < ite; ++t) {
float3 curV = make_float3(0, 0, 0);
// float3 save = verts[idx];
//For all neighbors of current vertex
for (int i = 1; i < curId ; i++) {
int idv1 = triangles[neighbors[i] * 3 + 0];
int idv2 = triangles[neighbors[i] * 3 + 1];
int idv3 = triangles[neighbors[i] * 3 + 2];
curV = curV + (verts[idv1] * (idv1 != idx));
curV = curV + (verts[idv2] * (idv2 != idx));
curV = curV + (verts[idv3] * (idv3 != idx));
// curV = curV - save;
}
if (curId != 1) {
curV = (curV / ((curId - 1) * 2));
verts[idx] = curV;
}
}
}
| 385760de033cb6ca5e03b79828b92f9061c27727.cu | /*MIT License
Copyright (c) 2019 Xavier Martinez
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#define MAX_VERTEX 15
// __constant__ int MAX_VERTEX = 15;
__constant__ unsigned int edgeTable[256] =
{
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
};
__constant__ int triTable[256][16] = {
{ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1 },
{ 8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1 },
{ 3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1 },
{ 4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1 },
{ 4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1 },
{ 9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1 },
{ 10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1 },
{ 5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1 },
{ 5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1 },
{ 8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1 },
{ 2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1 },
{ 2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1 },
{ 11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1 },
{ 5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1 },
{ 11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1 },
{ 11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1 },
{ 2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1 },
{ 6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1 },
{ 3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1 },
{ 6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1 },
{ 6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1 },
{ 8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1 },
{ 7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1 },
{ 3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1 },
{ 0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1 },
{ 9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1 },
{ 8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1 },
{ 5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1 },
{ 0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1 },
{ 6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1 },
{ 10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1 },
{ 1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1 },
{ 0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1 },
{ 3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1 },
{ 6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1 },
{ 9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1 },
{ 8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1 },
{ 3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1 },
{ 10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1 },
{ 10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1 },
{ 2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1 },
{ 7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1 },
{ 2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1 },
{ 1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1 },
{ 11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1 },
{ 8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1 },
{ 0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1 },
{ 7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1 },
{ 7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1 },
{ 10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1 },
{ 0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1 },
{ 7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1 },
{ 6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1 },
{ 4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1 },
{ 10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1 },
{ 8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1 },
{ 1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1 },
{ 10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1 },
{ 10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1 },
{ 9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1 },
{ 7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1 },
{ 3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1 },
{ 7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1 },
{ 3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1 },
{ 6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1 },
{ 9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1 },
{ 1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1 },
{ 4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1 },
{ 7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1 },
{ 6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1 },
{ 0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1 },
{ 6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1 },
{ 0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1 },
{ 11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1 },
{ 6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1 },
{ 5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1 },
{ 9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1 },
{ 1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1 },
{ 10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1 },
{ 0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1 },
{ 11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1 },
{ 9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1 },
{ 7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1 },
{ 2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1 },
{ 9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1 },
{ 9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1 },
{ 1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1 },
{ 0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1 },
{ 10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1 },
{ 2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1 },
{ 0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1 },
{ 0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1 },
{ 9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1 },
{ 5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1 },
{ 5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1 },
{ 8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1 },
{ 9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1 },
{ 1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1 },
{ 3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1 },
{ 4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1 },
{ 9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1 },
{ 11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1 },
{ 11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1 },
{ 2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1 },
{ 9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1 },
{ 3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1 },
{ 1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1 },
{ 4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1 },
{ 0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1 },
{ 9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1 },
{ 1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ 0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 },
{ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }
};
__constant__ int nbTriTable[256] {
0, 3, 3, 6, 3, 6, 6, 9, 3, 6, 6, 9, 6, 9, 9, 6, 3,
6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9,
3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12,
9, 6, 9, 9, 6, 9, 12, 12, 9, 9, 12, 12, 9, 12,
15, 15, 6, 3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12,
9, 12, 12, 9, 6, 9, 9, 12, 9, 12, 12, 15, 9, 12,
12, 15, 12, 15, 15, 12, 6, 9, 9, 12, 9, 12, 6,
9, 9, 12, 12, 15, 12, 15, 9, 6, 9, 12, 12, 9, 12,
15, 9, 6, 12, 15, 15, 12, 15, 6, 12, 3, 3, 6, 6,
9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 6, 9,
9, 12, 9, 12, 12, 15, 9, 6, 12, 9, 12, 9, 15, 6,
6, 9, 9, 12, 9, 12, 12, 15, 9, 12, 12, 15, 12,
15, 15, 12, 9, 12, 12, 9, 12, 15, 15, 12, 12,
9, 15, 6, 15, 12, 6, 3, 6, 9, 9, 12, 9, 12, 12,
15, 9, 12, 12, 15, 6, 9, 9, 6, 9, 12, 12, 15,
12, 15, 15, 6, 12, 9, 15, 12, 9, 6, 12, 3, 9,
12, 12, 15, 12, 15, 9, 12, 12, 15, 15, 6, 9,
12, 6, 3, 6, 9, 9, 6, 9, 12, 6, 3, 9, 6, 12, 3,
6, 3, 3, 0
};
struct point {
int3 xyz;
float val;
};
__device__ float3 linearInterpolation(float isovalue, point voxelA, point voxelB) {
float mu;
float3 p;
if (abs(isovalue - voxelA.val) < EPSILON) {
p.x = (float)voxelA.xyz.x;
p.y = (float)voxelA.xyz.y;
p.z = (float)voxelA.xyz.z;
return (p);
}
if (abs(isovalue - voxelB.val) < EPSILON) {
p.x = (float)voxelB.xyz.x;
p.y = (float)voxelB.xyz.y;
p.z = (float)voxelB.xyz.z;
return (p);
}
if (abs(voxelA.val - voxelB.val) < EPSILON) {
p.x = (float)voxelA.xyz.x;
p.y = (float)voxelA.xyz.y;
p.z = (float)voxelA.xyz.z;
return (p);
}
mu = (isovalue - voxelA.val) / (voxelB.val - voxelA.val);
p.x = voxelA.xyz.x + mu * (voxelB.xyz.x - voxelA.xyz.x);
p.y = voxelA.xyz.y + mu * (voxelB.xyz.y - voxelA.xyz.y);
p.z = voxelA.xyz.z + mu * (voxelB.xyz.z - voxelA.xyz.z);
return p;
// const float scale = (isovalue - voxelA.val) / (voxelB.val - voxelA.val);
// //Coordinates of position will be in float3
// float3 position;
// //Initialising position
// position.x = voxelA.xyz.x + scale * (voxelB.xyz.x - voxelA.xyz.x);
// position.y = voxelA.xyz.y + scale * (voxelB.xyz.y - voxelA.xyz.y);
// position.z = voxelA.xyz.z + scale * (voxelB.xyz.z - voxelA.xyz.z);
// return position;
}
__device__ inline unsigned int to1D(int3 ids, int3 dim) {
return (dim.y * dim.z * ids.x) + (dim.z * ids.y) + ids.z;
}
__device__ inline unsigned int to1D(uint3 ids, int3 dim) {
return (dim.y * dim.z * ids.x) + (dim.z * ids.y) + ids.z;
}
// __global__ void MCkernel(float isovalue, int3 dim, float* data, float3* vertex, int* triangle, int3 offset, unsigned int nbcells)
// {
// unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x) + offset.x;
// unsigned int j = (blockIdx.y * blockDim.y + threadIdx.y) + offset.y;
// unsigned int k = (blockIdx.z * blockDim.z + threadIdx.z) + offset.z;
// if (i >= dim.x - 1)
// return;
// if (j >= dim.y - 1)
// return;
// if (k >= dim.z - 1)
// return;
// //Variables
// point voxels[8];
// float3 vertlist[12];
// int index[8];
// int cubeIndex = 0;
// float3 vertices[MAX_VERTEX];
// int numTriangles = 0;
// int numVertices = 0;
// index[0] = to1D(make_int3(i, j, k), dim);
// index[1] = to1D(make_int3(i + 1, j, k), dim);
// index[2] = to1D(make_int3(i + 1, j + 1, k), dim);
// index[3] = to1D(make_int3(i, j + 1, k), dim);
// index[4] = to1D(make_int3(i, j, k + 1), dim);
// index[5] = to1D(make_int3(i + 1, j, k + 1), dim);
// index[6] = to1D(make_int3(i + 1, j + 1, k + 1), dim);
// index[7] = to1D(make_int3(i, j + 1, k + 1), dim);
// voxels[0].xyz.x = i;
// voxels[0].xyz.y = j;
// voxels[0].xyz.z = k;
// voxels[0].val = data[index[0]];
// voxels[1].xyz.x = i + 1;
// voxels[1].xyz.y = j;
// voxels[1].xyz.z = k;
// voxels[1].val = data[index[1]];
// voxels[2].xyz.x = i + 1;
// voxels[2].xyz.y = j + 1;
// voxels[2].xyz.z = k;
// voxels[2].val = data[index[2]];
// voxels[3].xyz.x = i;
// voxels[3].xyz.y = j + 1;
// voxels[3].xyz.z = k;
// voxels[3].val = data[index[3]];
// voxels[4].xyz.x = i;
// voxels[4].xyz.y = j;
// voxels[4].xyz.z = k + 1;
// voxels[4].val = data[index[4]];
// voxels[5].xyz.x = i + 1;
// voxels[5].xyz.y = j;
// voxels[5].xyz.z = k + 1;
// voxels[5].val = data[index[5]];
// voxels[6].xyz.x = i + 1;
// voxels[6].xyz.y = j + 1;
// voxels[6].xyz.z = k + 1;
// voxels[6].val = data[index[6]];
// voxels[7].xyz.x = i;
// voxels[7].xyz.y = j + 1;
// voxels[7].xyz.z = k + 1;
// voxels[7].val = data[index[7]];
// //PolygoniseCube
// if (voxels[0].val < isovalue) cubeIndex |= 1;
// if (voxels[1].val < isovalue) cubeIndex |= 2;
// if (voxels[2].val < isovalue) cubeIndex |= 4;
// if (voxels[3].val < isovalue) cubeIndex |= 8;
// if (voxels[4].val < isovalue) cubeIndex |= 16;
// if (voxels[5].val < isovalue) cubeIndex |= 32;
// if (voxels[6].val < isovalue) cubeIndex |= 64;
// if (voxels[7].val < isovalue) cubeIndex |= 128;
// //Getting edges
// unsigned int edges = edgeTable[cubeIndex];
// //Comparing edges with 12 bit by and operation and position coordinate
// if (edges == 0) {
// return;
// }
// if (edges & 1) {
// vertlist[0] = linearInterpolation(isovalue, voxels[0], voxels[1]);
// }
// if (edges & 2) {
// vertlist[1] = linearInterpolation(isovalue, voxels[1], voxels[2]);
// }
// if (edges & 4) {
// vertlist[2] = linearInterpolation(isovalue, voxels[2], voxels[3]);
// }
// if (edges & 8) {
// vertlist[3] = linearInterpolation(isovalue, voxels[3], voxels[0]);
// }
// if (edges & 16) {
// vertlist[4] = linearInterpolation(isovalue, voxels[4], voxels[5]);
// }
// if (edges & 32) {
// vertlist[5] = linearInterpolation(isovalue, voxels[5], voxels[6]);
// }
// if (edges & 64) {
// vertlist[6] = linearInterpolation(isovalue, voxels[6], voxels[7]);
// }
// if (edges & 128) {
// vertlist[7] = linearInterpolation(isovalue, voxels[7], voxels[4]);
// }
// if (edges & 256) {
// vertlist[8] = linearInterpolation(isovalue, voxels[0], voxels[4]);
// }
// if (edges & 512) {
// vertlist[9] = linearInterpolation(isovalue, voxels[1], voxels[5]);
// }
// if (edges & 1024) {
// vertlist[10] = linearInterpolation(isovalue, voxels[2], voxels[6]);
// }
// if (edges & 2048) {
// vertlist[11] = linearInterpolation(isovalue, voxels[3], voxels[7]);
// }
// if (cubeIndex >= 0 && cubeIndex < 256 ) {
// for (int i = 0; i < nbTriTable[cubeIndex]; i += 3) {
// if (triTable[cubeIndex][i] == -1 || numVertices + 3 >= MAX_VERTEX)
// break;
// vertices[numVertices++] = vertlist[triTable[cubeIndex][i]];
// vertices[numVertices++] = vertlist[triTable[cubeIndex][i + 1]];
// vertices[numVertices++] = vertlist[triTable[cubeIndex][i + 2]];
// ++numTriangles;
// }
// }
// // for (int n = 0; n < MAX_VERTEX; n += 3)
// // {
// // int edgeNumber = triTable[cubeIndex][n];
// // if (edgeNumber < 0)
// // break;
// // vertices[numVertices++] = pos[edgeNumber];
// // vertices[numVertices++] = pos[triTable[cubeIndex][n + 1]];
// // vertices[numVertices++] = pos[triTable[cubeIndex][n + 2]];
// // ++numTriangles;
// // }
// //Getting the number of triangles
// triangle[index[0]] = numTriangles;
// //Vertex List
// for (int n = 0; n < min(numVertices, MAX_VERTEX); ++n) {
// vertex[MAX_VERTEX * index[0] + n] = vertices[n];
// }
// }
__global__ void countVertexPerCell(const float isovalue, const int3 dim, const float* data, uint2 *vertPerCell, const int rangeSearch, const int3 offset) {
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
unsigned int j = (blockIdx.y * blockDim.y + threadIdx.y);
unsigned int k = (blockIdx.z * blockDim.z + threadIdx.z);
if (i > dim.x - 2)
return;
if (j > dim.y - 2)
return;
if (k > dim.z - 2)
return;
if(offset.x != 0 && i < rangeSearch - 3)
return;
if(offset.y != 0 && j < rangeSearch - 3)
return;
if(offset.z != 0 && k < rangeSearch - 3)
return;
if(offset.x == 0 && i >= dim.x - rangeSearch * 2 - 1)
return;
if(offset.y == 0 && j >= dim.y - rangeSearch * 2 - 1)
return;
if(offset.z == 0 && k >= dim.z - rangeSearch * 2 - 1)
return;
if(i >= dim.x - rangeSearch - 1)
return;
if(j >= dim.y - rangeSearch - 1)
return;
if(k >= dim.z - rangeSearch - 1)
return;
unsigned int id = to1D(make_int3(i, j, k), dim);
uint2 Nverts = make_uint2(0, 0);
float voxel0;
float voxel1;
float voxel2;
float voxel3;
float voxel4;
float voxel5;
float voxel6;
float voxel7;
// __shared__ float voxels[8];
// float3 vertlist[12];
// int index[8];
int cubeIndex = 0;
voxel0 = data[to1D(make_int3(i, j, k), dim)];
voxel1 = data[to1D(make_int3(i + 1, j, k), dim)];
voxel2 = data[to1D(make_int3(i + 1, j + 1, k), dim)];
voxel3 = data[to1D(make_int3(i, j + 1, k), dim)];
voxel4 = data[to1D(make_int3(i, j, k + 1), dim)];
voxel5 = data[to1D(make_int3(i + 1, j, k + 1), dim)];
voxel6 = data[to1D(make_int3(i + 1, j + 1, k + 1), dim)];
voxel7 = data[to1D(make_int3(i, j + 1, k + 1), dim)];
//PolygoniseCube
// if (voxel0 < isovalue) cubeIndex |= 1;
// if (voxel1 < isovalue) cubeIndex |= 2;
// if (voxel2 < isovalue) cubeIndex |= 4;
// if (voxel3 < isovalue) cubeIndex |= 8;
// if (voxel4 < isovalue) cubeIndex |= 16;
// if (voxel5 < isovalue) cubeIndex |= 32;
// if (voxel6 < isovalue) cubeIndex |= 64;
// if (voxel7 < isovalue) cubeIndex |= 128;
cubeIndex = ((unsigned int) (voxel0 < isovalue));
cubeIndex += ((unsigned int) (voxel1 < isovalue))* 2;
cubeIndex += ((unsigned int) (voxel2 < isovalue))* 4;
cubeIndex += ((unsigned int) (voxel3 < isovalue))* 8;
cubeIndex += ((unsigned int) (voxel4 < isovalue))* 16;
cubeIndex += ((unsigned int) (voxel5 < isovalue))* 32;
cubeIndex += ((unsigned int) (voxel6 < isovalue))* 64;
cubeIndex += ((unsigned int) (voxel7 < isovalue))* 128;
Nverts.x = nbTriTable[cubeIndex];
Nverts.y = (Nverts.x > 0);
vertPerCell[id] = Nverts;
}
__global__ void compactVoxels(unsigned int * compactedVoxelArray,
const uint2 *voxelOccupied,
unsigned int lastVoxel, unsigned int numVoxels,
unsigned int numVoxelsp1, int3 dim, const int rangeSearch, const int3 offset)
{
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
unsigned int j = (blockIdx.y * blockDim.y + threadIdx.y);
unsigned int k = (blockIdx.z * blockDim.z + threadIdx.z);
if (i > dim.x - 2)
return;
if (j > dim.y - 2)
return;
if (k > dim.z - 2)
return;
if(offset.x != 0 && i < rangeSearch - 3)
return;
if(offset.y != 0 && j < rangeSearch - 3)
return;
if(offset.z != 0 && k < rangeSearch - 3)
return;
if(offset.x == 0 && i >= dim.x - rangeSearch * 2 - 1)
return;
if(offset.y == 0 && j >= dim.y - rangeSearch * 2 - 1)
return;
if(offset.z == 0 && k >= dim.z - rangeSearch * 2 - 1)
return;
if(i >= dim.x - rangeSearch - 1)
return;
if(j >= dim.y - rangeSearch - 1)
return;
if(k >= dim.z - rangeSearch - 1)
return;
// if (i >= dim.x)
// return;
// if (j >= dim.y)
// return;
// if (k >= dim.z)
// return;
unsigned int id = to1D(make_int3(i, j, k), dim);
if (id < numVoxels) {
if ( (id < numVoxelsp1) ? voxelOccupied[id].y < voxelOccupied[id + 1].y : lastVoxel ) {
compactedVoxelArray[ voxelOccupied[id].y ] = id;
}
}
}
inline __device__ float3 lerp(float3 a, float3 b, float t)
{
return a + t * (b - a);
}
__device__ float3 gridPosition(uint3 cellPos, float3 originGrid, float dx) {
return (originGrid + (make_float3(cellPos.x, cellPos.y, cellPos.z) * dx) );
}
__device__ uint3 grid1DTo3D(unsigned int index, int3 gridDim) {
uint3 res;
res.x = index / (gridDim.y * gridDim.z); //Note the integer division . This is x
res.y = (index - res.x * gridDim.y * gridDim.z) / gridDim.z; //This is y
res.z = index - res.x * gridDim.y * gridDim.z - res.y * gridDim.z; //This is z
return res;
}
__device__ float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1) {
float t = (isolevel - f0) / (f1 - f0);
return lerp(p0, p1, t);
}
__global__ void generateTriangleVerticesSMEM(float3 *pos,
const unsigned int *compactedVoxelArray,
const uint2 * numVertsScanned,
const float *gridValues,
float4 originGridDX,
float isoValue, unsigned int activeVoxels,
unsigned int maxVertsM3, int3 dim, int3 offset)
{
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
unsigned int j = (blockIdx.y * blockDim.y + threadIdx.y);
unsigned int k = (blockIdx.z * blockDim.z + threadIdx.z);
// if (i >= dim.x)
// return;
// if (j >= dim.y)
// return;
// if (k >= dim.z)
// return;
int3 cudaGrid = make_int3(blockDim.x, blockDim.y, blockDim.z);
unsigned int id = to1D(make_int3(i, j, k), cudaGrid);
if (id >= activeVoxels)
return;
unsigned int voxel = compactedVoxelArray[id];
uint3 gridPos = grid1DTo3D(voxel, dim);
float dx = originGridDX.w;
float3 originGrid = make_float3(originGridDX.x, originGridDX.y, originGridDX.z);
float3 p = gridPosition(gridPos, originGrid, dx);
// calculate cell vertex positions
float3 v[8];
v[0] = p;
v[1] = p + make_float3(dx, 0, 0);
v[2] = p + make_float3(dx, dx, 0);
v[3] = p + make_float3(0, dx, 0);
v[4] = p + make_float3(0, 0, dx);
v[5] = p + make_float3(dx, 0, dx);
v[6] = p + make_float3(dx, dx, dx);
v[7] = p + make_float3(0, dx, dx);
float field[8];
field[0] = gridValues[voxel];
field[1] = gridValues[to1D(gridPos + make_uint3(1, 0, 0), dim)];
field[2] = gridValues[to1D(gridPos + make_uint3(1, 1, 0), dim)];
field[3] = gridValues[to1D(gridPos + make_uint3(0, 1, 0), dim)];
field[4] = gridValues[to1D(gridPos + make_uint3(0, 0, 1), dim)];
field[5] = gridValues[to1D(gridPos + make_uint3(1, 0, 1), dim)];
field[6] = gridValues[to1D(gridPos + make_uint3(1, 1, 1), dim)];
field[7] = gridValues[to1D(gridPos + make_uint3(0, 1, 1), dim)];
// recalculate flag
unsigned int cubeindex;
cubeindex = ((unsigned int)(field[0] < isoValue));
cubeindex += ((unsigned int)(field[1] < isoValue)) * 2;
cubeindex += ((unsigned int)(field[2] < isoValue)) * 4;
cubeindex += ((unsigned int)(field[3] < isoValue)) * 8;
cubeindex += ((unsigned int)(field[4] < isoValue)) * 16;
cubeindex += ((unsigned int)(field[5] < isoValue)) * 32;
cubeindex += ((unsigned int)(field[6] < isoValue)) * 64;
cubeindex += ((unsigned int)(field[7] < isoValue)) * 128;
// find the vertices where the surface intersects the cube
// Note: SIMD marching cubes implementations have no need
// for an edge table, because branch divergence eliminates any
// potential performance gain from only computing the per-edge
// vertices when indicated by the edgeTable.
// Use shared memory to keep register pressure under control.
// No need to call __syncthreads() since each thread uses its own
// private shared memory buffer.
__shared__ float3 vertlist[12 * NBTHREADS];
vertlist[threadIdx.x ] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]);
vertlist[(NBTHREADS * 1) + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]);
vertlist[(NBTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]);
vertlist[(NBTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]);
vertlist[(NBTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]);
vertlist[(NBTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]);
vertlist[(NBTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]);
vertlist[(NBTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]);
vertlist[(NBTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]);
vertlist[(NBTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]);
vertlist[(NBTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]);
vertlist[(NBTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]);
float3 offsetPos = make_float3(offset.x * dx, offset.y * dx, offset.z * dx);
// output triangle vertices
unsigned int numVerts = nbTriTable[cubeindex];
for (int i = 0; i < numVerts; i += 3) {
unsigned int index = numVertsScanned[voxel].x + i;
float3 *vert[3];
int edge;
edge = triTable[cubeindex][i];
vert[0] = &vertlist[(edge * NBTHREADS) + threadIdx.x];
edge = triTable[cubeindex][i + 1];
vert[1] = &vertlist[(edge * NBTHREADS) + threadIdx.x];
edge = triTable[cubeindex][i + 2];
vert[2] = &vertlist[(edge * NBTHREADS) + threadIdx.x];
if (index < maxVertsM3) {
pos[index ] = *vert[0] + offsetPos;
pos[index + 1] = *vert[1] + offsetPos;
pos[index + 2] = *vert[2] + offsetPos;
}
}
}
__global__ void groupVertices(float3 *verts, const unsigned int nbVerts, const float tolerance) {
unsigned int i = (blockIdx.x * blockDim.x + threadIdx.x);
if (i >= nbVerts)
return;
verts[i].x = round(verts[i].x / tolerance) * tolerance;
verts[i].y = round(verts[i].y / tolerance) * tolerance;
verts[i].z = round(verts[i].z / tolerance) * tolerance;
}
#define MAXNEIGHBOR 32
__global__ void LaplacianSmooth(float3 *verts, int *triangles, const unsigned int nbVerts, const unsigned int nbTris, const unsigned int ite) {
//For each vertex
unsigned int idx = (threadIdx.x + blockIdx.x * blockDim.x);
if (idx >= nbVerts) {
return;
}
/* int neighbors[MAXNEIGHBOR];
int curId = -1;
for (int i = 0; i < MAXNEIGHBOR; i++) {
neighbors[i] = -1;
}
for (int t = 0; t < nbTris && curId < MAXNEIGHBOR; t++) {
if (triangles[t * 3] == idx || triangles[t * 3 + 1] == idx || triangles[t * 3 + 2] == idx)
neighbors[curId++] = t;
// int compar = (triangles[t * 3] == idx || triangles[t * 3 + 1] == idx || triangles[t * 3 + 2] == idx);
// neighbors[curId*compar] = t;
// curId = compar + curId;
}
for (int t = 0; t < ite; ++t) {
float3 curV = make_float3(0,0,0);
for (int i = 0; i < curId; i++) {
int idv1 = triangles[neighbors[i] * 3 + 0];
int idv2 = triangles[neighbors[i] * 3 + 1];
int idv3 = triangles[neighbors[i] * 3 + 2];
if(idv1 != idx)
curV = curV + verts[idv1];
if(idv2 != idx)
curV = curV + verts[idv2];
if(idv3 != idx)
curV = curV + verts[idv3];
}
curV = curV / max(1,curId*2);
verts[idx] = curV;
}*/
int neighbors[MAXNEIGHBOR];//The 0 is not used
int curId = 1;
for (int i = 0; i < MAXNEIGHBOR; i++) {
neighbors[i] = -1;
}
//TODO: Could use shared mem here
//Get 31 neighbors max
for (int t = 0; t < nbTris && curId < MAXNEIGHBOR; t++) {
int compar = (triangles[t * 3] == idx || triangles[t * 3 + 1] == idx || triangles[t * 3 + 2] == idx);
neighbors[curId * compar] = t;
curId = compar + curId;
}
for (int t = 0; t < ite; ++t) {
float3 curV = make_float3(0, 0, 0);
// float3 save = verts[idx];
//For all neighbors of current vertex
for (int i = 1; i < curId ; i++) {
int idv1 = triangles[neighbors[i] * 3 + 0];
int idv2 = triangles[neighbors[i] * 3 + 1];
int idv3 = triangles[neighbors[i] * 3 + 2];
curV = curV + (verts[idv1] * (idv1 != idx));
curV = curV + (verts[idv2] * (idv2 != idx));
curV = curV + (verts[idv3] * (idv3 != idx));
// curV = curV - save;
}
if (curId != 1) {
curV = (curV / ((curId - 1) * 2));
verts[idx] = curV;
}
}
}
|
3264834717cc0afde0633e78a6f59559cfc916ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
__global__ void fun(int *y){
struct big_type {
int arr[32];
};
union u {
struct s1 { char c; struct big_type bt1; } sub1;
struct s2 { long long x; struct big_type bt2; } sub2;
} obj;
obj.sub2.bt2 = obj.sub1.bt1;
printf("%d\n", obj.sub2.bt2);
printf("%d\n", obj.sub1.bt1);
}
int main(void)
{
int y;
int *dev_y;
hipMalloc((void**)&dev_y, sizeof(int));
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_y);
hipMemcpy(&y, dev_y, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_y);
return 0;
}
//; nvcc: 0 0;gcc:1526236216 2147483637;
| 3264834717cc0afde0633e78a6f59559cfc916ae.cu | #include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <stddef.h>
#include <limits.h>
__global__ void fun(int *y){
struct big_type {
int arr[32];
};
union u {
struct s1 { char c; struct big_type bt1; } sub1;
struct s2 { long long x; struct big_type bt2; } sub2;
} obj;
obj.sub2.bt2 = obj.sub1.bt1;
printf("%d\n", obj.sub2.bt2);
printf("%d\n", obj.sub1.bt1);
}
int main(void)
{
int y;
int *dev_y;
cudaMalloc((void**)&dev_y, sizeof(int));
fun<<<1,1>>>(dev_y);
cudaMemcpy(&y, dev_y, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_y);
return 0;
}
//编译通过; nvcc: 0 0;gcc:1526236216 2147483637;
|
979128c524464c514279315888bde0f2a3bb1e2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "path_tracer.hpp"
#include <helper_math.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/sequence.h>
#include <hiprand/hiprand_kernel.h>
#include "constants.hpp"
#include "types.hpp"
#include "utils.hpp"
namespace cupt {
namespace {
__constant__ Camera camera;
__device__ inline float3 ComputeReflectionDirection(const float3 normal,
const float3 incident) {
/* Compute reflection direction.
* Refer to "Rui Wang, Lec12 - Ray Tracing, page 11" */
return normalize(incident - 2.0 * dot(incident, normal) * normal);
}
__device__ float3 ComputeRandomCosineWeightedDirection(
const float3 normal, const float3 incident, const float shininess,
hiprandState_t* curand_state) {
/* Compute a random cosine weighted direction in heimsphere */
float random1 = hiprand_uniform(curand_state);
float cos_phi = powf(random1, 1.0 / (1 + shininess));
float sin_phi = sqrt(1 - square(cos_phi));
float random2 = hiprand_uniform(curand_state);
float theta = kTwoPi * random2;
/* Choose a axis not near to normal */
float3 not_normal;
if (fabs(normal.x) < kSQRTOfOneThird) {
not_normal = make_float3(1, 0, 0);
} else if (fabs(normal.y) < kSQRTOfOneThird) {
not_normal = make_float3(0, 1, 0);
} else {
not_normal = make_float3(0, 0, 1);
}
float3 x_axis = cross(normal, not_normal);
float3 y_axis = cross(normal, x_axis);
float3 direction = ((cos(theta) * sin_phi * x_axis) +
(sin(theta) * sin_phi * y_axis) + (cos_phi * normal));
return normalize(direction);
}
__device__ inline float3 ComputeTransmissionDirection(
const float3 normal, const float3 incident, const float eta,
hiprandState_t* /* ignored */) {
/* Compute refraction direction according to Snell's Law.
* Refer to "Rui Wang, Lec12 - Ray Tracing, page 15" */
float cos_theta_i = dot(normal, incident);
float radicand = 1 - square(eta) * (1 - square(cos_theta_i));
if (radicand < 0) /* Total Internal Reflection */
return make_float3(0);
float cos_theta_o = sqrt(radicand);
float3 direction =
(sign(cos_theta_i) * cos_theta_o - eta * cos_theta_i) * normal +
eta * incident;
return normalize(direction);
}
__device__ float3 ComputeSpecularityDirection(const float3 normal,
const float3 incident,
const float shininess,
hiprandState_t* curand_state) {
float3 perfect = ComputeReflectionDirection(normal, incident);
return ComputeRandomCosineWeightedDirection(perfect, incident, shininess,
curand_state);
}
__device__ float3 ComputeDiffusionDirection(const float3 normal,
const float3 incident,
hiprandState_t* curand_state) {
return ComputeRandomCosineWeightedDirection(normal, incident, 1.0,
curand_state);
}
__global__ void InitializationKernal(size_t* indices,
hiprandState_t* curand_states,
size_t num_pixels, size_t seed) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_pixels) return;
indices[idx] = idx;
hiprand_init(hash(idx) * hash(seed), 0, 0, &curand_states[idx]);
}
__global__ void RayCastFromCameraKernel(Ray* rays, const size_t num_pixels,
const float intensity,
hiprandState_t* curand_states) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_pixels) return;
size_t x = idx % camera.resolution.x;
size_t y = camera.resolution.y - idx / camera.resolution.x - 1;
hiprandState_t* const curand_state = &curand_states[idx];
/* compute axis direction */
float3 x_axis = normalize(cross(camera.view, camera.up));
float3 y_axis = normalize(cross(x_axis, camera.view));
/* compute image plane ratio and center position */
float ratio = camera.resolution.x * 1.0 / camera.resolution.y;
float3 center = camera.position + camera.view * camera.focal_distance;
/* compute the jittered point position on image plane */
float2 jitter = make_float2(hiprand_uniform(curand_state) - 0.5,
hiprand_uniform(curand_state) - 0.5);
float2 distances = (make_float2(make_uint2(x, y)) + jitter) /
(make_float2(camera.resolution) - 1);
distances = (2 * distances - 1) * make_float2(ratio, 1);
float3 point = center + distances.x * x_axis + distances.y * y_axis;
/* compute origin of the ray */
float3 origin = camera.position;
if (camera.aperture_radius > kEpsilon) {
float angle = kTwoPi * hiprand_uniform(curand_state);
float distance =
camera.aperture_radius * sqrt(hiprand_uniform(curand_state));
float2 coord = make_float2(cos(angle) * distance, sin(angle) * distance);
origin += x_axis * coord.x + y_axis * coord.y;
}
rays[idx].origin = origin;
rays[idx].direction = normalize(point - origin);
rays[idx].color = make_float3(intensity);
}
__global__ void PathTraceKernel(const Triangle* triangles,
const size_t num_triangles, size_t* indices,
Ray* rays, float3* colors,
const size_t num_pixels,
hiprandState_t* curand_states) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_pixels) return;
size_t& index = indices[idx];
if (index == kMaximumSize) return;
Ray& ray = rays[index];
hiprandState_t* curand_state = &curand_states[index];
/** Get the nearest intersection */
size_t intersection_idx = kInvalidIndex;
float3 weight = make_float3(1e10f); /* p, q, t */
for (size_t i = 0; i < num_triangles; i++) {
float3 w = triangles[i].Hit(ray);
if (w.z > 0 && w.z < weight.z) {
weight = w;
intersection_idx = i;
}
}
/** If no intersection, mark as dead ray */
if (intersection_idx == kInvalidIndex) {
index = kInvalidIndex;
} else { /** Else get secondary ray */
/* Transmit ray to the intersection point */
ray.origin += weight.z * ray.direction;
const Triangle triangle = triangles[intersection_idx];
float3 normal = triangle.GetNormal(weight.x, weight.y);
float shininess = triangle.material.shininess;
float eta = 1.0;
bool into = (dot(normal, ray.direction) < 0);
if (!into) normal *= -1;
/* Specular material by default */
float3 diffusion = triangle.material.diffuse_color;
float3 specularity = triangle.material.specular_color;
float3 transmission = make_float3(0);
/* Transparent material, calculate fresnel cofficient */
if (triangle.material.dissolve < 1) {
float incident_ior = kAirIoR, transmitted_ior = kAirIoR;
if (into) /* Air -> Material */
transmitted_ior = triangle.material.ior;
else /* Material -> ir */
incident_ior = triangle.material.ior;
eta = incident_ior / transmitted_ior;
shininess = 1000; // Mirror reflection
const float3 direction = ComputeTransmissionDirection(
normal, ray.direction, eta, curand_state);
if (iszero(direction)) { /* Total Internal Reflection */
specularity = make_float3(1);
transmission = make_float3(0);
} else { /* Calculate Fresnel Cofficient */
float cos_theta_i = fabs(dot(normal, ray.direction));
float cos_theta_o = fabs(dot(normal, direction));
float rs = square(
(incident_ior * cos_theta_i - transmitted_ior * cos_theta_o) /
(incident_ior * cos_theta_i + transmitted_ior * cos_theta_o));
float rt = square(
(incident_ior * cos_theta_o - transmitted_ior * cos_theta_i) /
(incident_ior * cos_theta_o + transmitted_ior * cos_theta_i));
float r = (rs + rt);
specularity = make_float3(r);
transmission = make_float3(2 - r);
}
}
/* Russian Roulette */
float3 threshold[3];
threshold[0] = diffusion;
threshold[1] = threshold[0] + specularity;
threshold[2] = threshold[1] + transmission;
float3 random = threshold[2] * make_float3(hiprand_uniform(curand_state),
hiprand_uniform(curand_state),
hiprand_uniform(curand_state));
if (random <= threshold[0]) { /* Diffusion */
colors[index] += (ray.color * triangle.material.emitted_color);
ray.color *= triangle.material.diffuse_color;
ray.direction =
ComputeDiffusionDirection(normal, ray.direction, curand_state);
} else if (random <= threshold[1]) { /* Specular */
ray.color *= triangle.material.specular_color;
ray.direction = ComputeSpecularityDirection(normal, ray.direction,
shininess, curand_state);
} else if (random <= threshold[2]) { /* Transmission */
ray.color *= (1 - triangle.material.dissolve);
ray.direction = ComputeTransmissionDirection(normal, ray.direction, eta,
curand_state);
}
ray.origin += kRayOriginBias * ray.direction;
}
}
} // namespace
Image PathTracer::Render(const Camera& host_camera) {
checkCudaErrors(hipMemcpyToSymbol(camera, &host_camera, sizeof(Camera)));
const size_t num_pixels = host_camera.resolution.x * host_camera.resolution.y;
thrust::device_vector<Triangle> triangles(m_scene.triangles);
Triangle* const triangles_ptr = thrust::raw_pointer_cast(triangles.data());
thrust::device_vector<float3> colors(num_pixels, make_float3(0));
float3* const colors_ptr = thrust::raw_pointer_cast(colors.data());
thrust::device_vector<Ray> rays(num_pixels);
Ray* const rays_ptr = thrust::raw_pointer_cast(rays.data());
thrust::device_vector<hiprandState_t> curand_curand_states(num_pixels);
hiprandState_t* const curand_curand_states_ptr =
thrust::raw_pointer_cast(curand_curand_states.data());
thrust::device_vector<size_t> indices(num_pixels);
size_t* indices_ptr = thrust::raw_pointer_cast(indices.data());
for (size_t counter = 0; counter < m_parameter.mc_sample_times; counter++) {
/* Initialization */
indices.resize(num_pixels);
hipLaunchKernelGGL(( InitializationKernal), dim3(divUp(num_pixels, kThreadsPerBlock)),
dim3(kThreadsPerBlock), 0, 0,
indices_ptr, curand_curand_states_ptr, num_pixels, counter);
/* Create rays from camera */
hipLaunchKernelGGL(( RayCastFromCameraKernel), dim3(divUp(num_pixels, kThreadsPerBlock)),
dim3(kThreadsPerBlock), 0, 0,
rays_ptr, num_pixels, m_scene.intensity, curand_curand_states_ptr);
for (size_t depth = 0; depth < m_parameter.max_trace_depth; depth++) {
/* Step 0. Check if over. */
if (indices.size() == 0) break;
/* Step 1. Trace rays to get secondary rays. */
hipLaunchKernelGGL(( PathTraceKernel), dim3(divUp(indices.size(), kThreadsPerBlock)),
dim3(kThreadsPerBlock), 0, 0,
triangles_ptr, triangles.size(), indices_ptr, rays_ptr, colors_ptr,
indices.size(), curand_curand_states_ptr);
/* Step 2. Compact rays, remove dead rays. */
thrust::device_vector<size_t>::iterator end =
thrust::remove_if(indices.begin(), indices.end(), IsInvalidIndex());
indices.resize(end - indices.begin());
}
}
return Image(host_camera.resolution, colors,
Color2Pixel(m_parameter.mc_sample_times));
}
} // namespace cupt | 979128c524464c514279315888bde0f2a3bb1e2e.cu | #include "path_tracer.hpp"
#include <helper_math.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/sequence.h>
#include <curand_kernel.h>
#include "constants.hpp"
#include "types.hpp"
#include "utils.hpp"
namespace cupt {
namespace {
__constant__ Camera camera;
__device__ inline float3 ComputeReflectionDirection(const float3 normal,
const float3 incident) {
/* Compute reflection direction.
* Refer to "Rui Wang, Lec12 - Ray Tracing, page 11" */
return normalize(incident - 2.0 * dot(incident, normal) * normal);
}
__device__ float3 ComputeRandomCosineWeightedDirection(
const float3 normal, const float3 incident, const float shininess,
curandState* curand_state) {
/* Compute a random cosine weighted direction in heimsphere */
float random1 = curand_uniform(curand_state);
float cos_phi = powf(random1, 1.0 / (1 + shininess));
float sin_phi = sqrt(1 - square(cos_phi));
float random2 = curand_uniform(curand_state);
float theta = kTwoPi * random2;
/* Choose a axis not near to normal */
float3 not_normal;
if (fabs(normal.x) < kSQRTOfOneThird) {
not_normal = make_float3(1, 0, 0);
} else if (fabs(normal.y) < kSQRTOfOneThird) {
not_normal = make_float3(0, 1, 0);
} else {
not_normal = make_float3(0, 0, 1);
}
float3 x_axis = cross(normal, not_normal);
float3 y_axis = cross(normal, x_axis);
float3 direction = ((cos(theta) * sin_phi * x_axis) +
(sin(theta) * sin_phi * y_axis) + (cos_phi * normal));
return normalize(direction);
}
__device__ inline float3 ComputeTransmissionDirection(
const float3 normal, const float3 incident, const float eta,
curandState* /* ignored */) {
/* Compute refraction direction according to Snell's Law.
* Refer to "Rui Wang, Lec12 - Ray Tracing, page 15" */
float cos_theta_i = dot(normal, incident);
float radicand = 1 - square(eta) * (1 - square(cos_theta_i));
if (radicand < 0) /* Total Internal Reflection */
return make_float3(0);
float cos_theta_o = sqrt(radicand);
float3 direction =
(sign(cos_theta_i) * cos_theta_o - eta * cos_theta_i) * normal +
eta * incident;
return normalize(direction);
}
__device__ float3 ComputeSpecularityDirection(const float3 normal,
const float3 incident,
const float shininess,
curandState* curand_state) {
float3 perfect = ComputeReflectionDirection(normal, incident);
return ComputeRandomCosineWeightedDirection(perfect, incident, shininess,
curand_state);
}
__device__ float3 ComputeDiffusionDirection(const float3 normal,
const float3 incident,
curandState* curand_state) {
return ComputeRandomCosineWeightedDirection(normal, incident, 1.0,
curand_state);
}
__global__ void InitializationKernal(size_t* indices,
curandState* curand_states,
size_t num_pixels, size_t seed) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_pixels) return;
indices[idx] = idx;
curand_init(hash(idx) * hash(seed), 0, 0, &curand_states[idx]);
}
__global__ void RayCastFromCameraKernel(Ray* rays, const size_t num_pixels,
const float intensity,
curandState* curand_states) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_pixels) return;
size_t x = idx % camera.resolution.x;
size_t y = camera.resolution.y - idx / camera.resolution.x - 1;
curandState* const curand_state = &curand_states[idx];
/* compute axis direction */
float3 x_axis = normalize(cross(camera.view, camera.up));
float3 y_axis = normalize(cross(x_axis, camera.view));
/* compute image plane ratio and center position */
float ratio = camera.resolution.x * 1.0 / camera.resolution.y;
float3 center = camera.position + camera.view * camera.focal_distance;
/* compute the jittered point position on image plane */
float2 jitter = make_float2(curand_uniform(curand_state) - 0.5,
curand_uniform(curand_state) - 0.5);
float2 distances = (make_float2(make_uint2(x, y)) + jitter) /
(make_float2(camera.resolution) - 1);
distances = (2 * distances - 1) * make_float2(ratio, 1);
float3 point = center + distances.x * x_axis + distances.y * y_axis;
/* compute origin of the ray */
float3 origin = camera.position;
if (camera.aperture_radius > kEpsilon) {
float angle = kTwoPi * curand_uniform(curand_state);
float distance =
camera.aperture_radius * sqrt(curand_uniform(curand_state));
float2 coord = make_float2(cos(angle) * distance, sin(angle) * distance);
origin += x_axis * coord.x + y_axis * coord.y;
}
rays[idx].origin = origin;
rays[idx].direction = normalize(point - origin);
rays[idx].color = make_float3(intensity);
}
__global__ void PathTraceKernel(const Triangle* triangles,
const size_t num_triangles, size_t* indices,
Ray* rays, float3* colors,
const size_t num_pixels,
curandState* curand_states) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_pixels) return;
size_t& index = indices[idx];
if (index == kMaximumSize) return;
Ray& ray = rays[index];
curandState* curand_state = &curand_states[index];
/** Get the nearest intersection */
size_t intersection_idx = kInvalidIndex;
float3 weight = make_float3(1e10f); /* p, q, t */
for (size_t i = 0; i < num_triangles; i++) {
float3 w = triangles[i].Hit(ray);
if (w.z > 0 && w.z < weight.z) {
weight = w;
intersection_idx = i;
}
}
/** If no intersection, mark as dead ray */
if (intersection_idx == kInvalidIndex) {
index = kInvalidIndex;
} else { /** Else get secondary ray */
/* Transmit ray to the intersection point */
ray.origin += weight.z * ray.direction;
const Triangle triangle = triangles[intersection_idx];
float3 normal = triangle.GetNormal(weight.x, weight.y);
float shininess = triangle.material.shininess;
float eta = 1.0;
bool into = (dot(normal, ray.direction) < 0);
if (!into) normal *= -1;
/* Specular material by default */
float3 diffusion = triangle.material.diffuse_color;
float3 specularity = triangle.material.specular_color;
float3 transmission = make_float3(0);
/* Transparent material, calculate fresnel cofficient */
if (triangle.material.dissolve < 1) {
float incident_ior = kAirIoR, transmitted_ior = kAirIoR;
if (into) /* Air -> Material */
transmitted_ior = triangle.material.ior;
else /* Material -> ir */
incident_ior = triangle.material.ior;
eta = incident_ior / transmitted_ior;
shininess = 1000; // Mirror reflection
const float3 direction = ComputeTransmissionDirection(
normal, ray.direction, eta, curand_state);
if (iszero(direction)) { /* Total Internal Reflection */
specularity = make_float3(1);
transmission = make_float3(0);
} else { /* Calculate Fresnel Cofficient */
float cos_theta_i = fabs(dot(normal, ray.direction));
float cos_theta_o = fabs(dot(normal, direction));
float rs = square(
(incident_ior * cos_theta_i - transmitted_ior * cos_theta_o) /
(incident_ior * cos_theta_i + transmitted_ior * cos_theta_o));
float rt = square(
(incident_ior * cos_theta_o - transmitted_ior * cos_theta_i) /
(incident_ior * cos_theta_o + transmitted_ior * cos_theta_i));
float r = (rs + rt);
specularity = make_float3(r);
transmission = make_float3(2 - r);
}
}
/* Russian Roulette */
float3 threshold[3];
threshold[0] = diffusion;
threshold[1] = threshold[0] + specularity;
threshold[2] = threshold[1] + transmission;
float3 random = threshold[2] * make_float3(curand_uniform(curand_state),
curand_uniform(curand_state),
curand_uniform(curand_state));
if (random <= threshold[0]) { /* Diffusion */
colors[index] += (ray.color * triangle.material.emitted_color);
ray.color *= triangle.material.diffuse_color;
ray.direction =
ComputeDiffusionDirection(normal, ray.direction, curand_state);
} else if (random <= threshold[1]) { /* Specular */
ray.color *= triangle.material.specular_color;
ray.direction = ComputeSpecularityDirection(normal, ray.direction,
shininess, curand_state);
} else if (random <= threshold[2]) { /* Transmission */
ray.color *= (1 - triangle.material.dissolve);
ray.direction = ComputeTransmissionDirection(normal, ray.direction, eta,
curand_state);
}
ray.origin += kRayOriginBias * ray.direction;
}
}
} // namespace
Image PathTracer::Render(const Camera& host_camera) {
checkCudaErrors(cudaMemcpyToSymbol(camera, &host_camera, sizeof(Camera)));
const size_t num_pixels = host_camera.resolution.x * host_camera.resolution.y;
thrust::device_vector<Triangle> triangles(m_scene.triangles);
Triangle* const triangles_ptr = thrust::raw_pointer_cast(triangles.data());
thrust::device_vector<float3> colors(num_pixels, make_float3(0));
float3* const colors_ptr = thrust::raw_pointer_cast(colors.data());
thrust::device_vector<Ray> rays(num_pixels);
Ray* const rays_ptr = thrust::raw_pointer_cast(rays.data());
thrust::device_vector<curandState> curand_curand_states(num_pixels);
curandState* const curand_curand_states_ptr =
thrust::raw_pointer_cast(curand_curand_states.data());
thrust::device_vector<size_t> indices(num_pixels);
size_t* indices_ptr = thrust::raw_pointer_cast(indices.data());
for (size_t counter = 0; counter < m_parameter.mc_sample_times; counter++) {
/* Initialization */
indices.resize(num_pixels);
InitializationKernal<<<divUp(num_pixels, kThreadsPerBlock),
kThreadsPerBlock>>>(
indices_ptr, curand_curand_states_ptr, num_pixels, counter);
/* Create rays from camera */
RayCastFromCameraKernel<<<divUp(num_pixels, kThreadsPerBlock),
kThreadsPerBlock>>>(
rays_ptr, num_pixels, m_scene.intensity, curand_curand_states_ptr);
for (size_t depth = 0; depth < m_parameter.max_trace_depth; depth++) {
/* Step 0. Check if over. */
if (indices.size() == 0) break;
/* Step 1. Trace rays to get secondary rays. */
PathTraceKernel<<<divUp(indices.size(), kThreadsPerBlock),
kThreadsPerBlock>>>(
triangles_ptr, triangles.size(), indices_ptr, rays_ptr, colors_ptr,
indices.size(), curand_curand_states_ptr);
/* Step 2. Compact rays, remove dead rays. */
thrust::device_vector<size_t>::iterator end =
thrust::remove_if(indices.begin(), indices.end(), IsInvalidIndex());
indices.resize(end - indices.begin());
}
}
return Image(host_camera.resolution, colors,
Color2Pixel(m_parameter.mc_sample_times));
}
} // namespace cupt |
5e09a174405e319c1e1f80b8161d9f084de138c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2021 Innovation Academy for Microsatellites of CAS
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Wang Junyong (wangjunyong@microsate.com)
*/
#include "adi-station-kernel.h"
#include "adi-constant.h"
#include "adi-util.h"
namespace adi {
namespace cuda {
namespace station {
__device__ double ToGreenwichSiderealTime(int64_t ticks)
{
// julian date of previous midnight
double jd0 = floor(ToJulian(ticks) + 0.5) - 0.5;
// julian centuries since epoch
double t = (jd0 - 2451545.0) / 36525.0;
double jdf = ToJulian(ticks) - jd0;
double gt = 24110.54841 + t * (8640184.812866 + t * (0.093104 - t * 6.2E-6));
gt += jdf * 1.00273790935 * 86400.0;
double gst = WrapTwoPI ((gt / 240.0) * M_PI / 180.0);
return gst;
}
__global__ void CalcLocalGreenwichSiderealTime (
Gst* gst,
adi::Station::Ele* element,
int64_t* ticks,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
uint32_t i = tid + sid * cols;
gst[i] = WrapTwoPI (ToGreenwichSiderealTime (ticks[tid]) + element[sid].lon);
}
__global__ void CalcEciPosition (
Vector* position,
adi::Station::Ele* element,
Gst* gst,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
uint32_t i = tid + sid * cols;
double sin_theta = sin (gst[i]);
double cos_theta = cos (gst[i]);
double sin_lat = sin (element[sid].lat);
double cos_lat = cos (element[sid].lat);
double c = 1 / sqrt (1.0 + K_F * (K_F - 2.0) * sin_lat * sin_lat);
double s = (1 - K_F) * (1 - K_F) * c;
double achcp = (K_RE * c + element[sid].alt) * cos_lat;
position[i].x = achcp * cos_theta;
position[i].y = achcp * sin_theta;
position[i].z = (K_RE * s + element[sid].alt) * sin_lat;
}
__global__ void CalcEciVelocity (
Vector* velocity,
Vector* position,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
uint32_t i = tid + sid * cols;
velocity[i].x = -K_EAR * position[i].y;
velocity[i].y = K_EAR * position[i].x;
velocity[i].z = 0.0;
}
__global__ void CalcMatrixFromEciToBody (
Matrix* matrix,
adi::Station::Ele* element,
Gst* gst,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
double sin_lat = sin (element[sid].lat);
double cos_lat = cos (element[sid].lat);
double sin_theta = sin (gst[tid]);
double cos_theta = cos (gst[tid]);
uint32_t i = sid * cols + tid;
matrix[i].r1.x = sin_lat * cos_theta;
matrix[i].r1.y = sin_lat * sin_theta;
matrix[i].r1.z = -cos_lat;
matrix[i].r2.x = -sin_theta;
matrix[i].r2.y = cos_theta;
matrix[i].r2.z = 0.0;
matrix[i].r3.x = cos_lat * cos_theta;
matrix[i].r3.y = cos_lat * sin_theta;
matrix[i].r3.z = sin_lat;
}
__global__ void CalcDerivMatrixFromEciToBody (
Matrix* derivMatrix,
Matrix* matrix,
adi::Station::Ele* element,
Gst* gst,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
uint32_t i = sid * cols + tid;
derivMatrix[i].r1.x = -matrix[i].r1.y * K_EAR;
derivMatrix[i].r1.y = matrix[i].r1.x * K_EAR;
derivMatrix[i].r1.z = 0;
derivMatrix[i].r2.x = -matrix[i].r2.y * K_EAR;
derivMatrix[i].r2.y = matrix[i].r2.x * K_EAR;
derivMatrix[i].r2.z = 0.0;
derivMatrix[i].r3.x = -matrix[i].r3.y * K_EAR;
derivMatrix[i].r3.y = matrix[i].r3.x * K_EAR;
derivMatrix[i].r3.z = 0;
}
}
}
} | 5e09a174405e319c1e1f80b8161d9f084de138c8.cu | /* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2021 Innovation Academy for Microsatellites of CAS
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Wang Junyong (wangjunyong@microsate.com)
*/
#include "adi-station-kernel.h"
#include "adi-constant.h"
#include "adi-util.h"
namespace adi {
namespace cuda {
namespace station {
__device__ double ToGreenwichSiderealTime(int64_t ticks)
{
// julian date of previous midnight
double jd0 = floor(ToJulian(ticks) + 0.5) - 0.5;
// julian centuries since epoch
double t = (jd0 - 2451545.0) / 36525.0;
double jdf = ToJulian(ticks) - jd0;
double gt = 24110.54841 + t * (8640184.812866 + t * (0.093104 - t * 6.2E-6));
gt += jdf * 1.00273790935 * 86400.0;
double gst = WrapTwoPI ((gt / 240.0) * M_PI / 180.0);
return gst;
}
__global__ void CalcLocalGreenwichSiderealTime (
Gst* gst,
adi::Station::Ele* element,
int64_t* ticks,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
uint32_t i = tid + sid * cols;
gst[i] = WrapTwoPI (ToGreenwichSiderealTime (ticks[tid]) + element[sid].lon);
}
__global__ void CalcEciPosition (
Vector* position,
adi::Station::Ele* element,
Gst* gst,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
uint32_t i = tid + sid * cols;
double sin_theta = sin (gst[i]);
double cos_theta = cos (gst[i]);
double sin_lat = sin (element[sid].lat);
double cos_lat = cos (element[sid].lat);
double c = 1 / sqrt (1.0 + K_F * (K_F - 2.0) * sin_lat * sin_lat);
double s = (1 - K_F) * (1 - K_F) * c;
double achcp = (K_RE * c + element[sid].alt) * cos_lat;
position[i].x = achcp * cos_theta;
position[i].y = achcp * sin_theta;
position[i].z = (K_RE * s + element[sid].alt) * sin_lat;
}
__global__ void CalcEciVelocity (
Vector* velocity,
Vector* position,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
uint32_t i = tid + sid * cols;
velocity[i].x = -K_EAR * position[i].y;
velocity[i].y = K_EAR * position[i].x;
velocity[i].z = 0.0;
}
__global__ void CalcMatrixFromEciToBody (
Matrix* matrix,
adi::Station::Ele* element,
Gst* gst,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
double sin_lat = sin (element[sid].lat);
double cos_lat = cos (element[sid].lat);
double sin_theta = sin (gst[tid]);
double cos_theta = cos (gst[tid]);
uint32_t i = sid * cols + tid;
matrix[i].r1.x = sin_lat * cos_theta;
matrix[i].r1.y = sin_lat * sin_theta;
matrix[i].r1.z = -cos_lat;
matrix[i].r2.x = -sin_theta;
matrix[i].r2.y = cos_theta;
matrix[i].r2.z = 0.0;
matrix[i].r3.x = cos_lat * cos_theta;
matrix[i].r3.y = cos_lat * sin_theta;
matrix[i].r3.z = sin_lat;
}
__global__ void CalcDerivMatrixFromEciToBody (
Matrix* derivMatrix,
Matrix* matrix,
adi::Station::Ele* element,
Gst* gst,
size_t rows,
size_t cols)
{
uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t sid = blockIdx.y;
if (tid >= cols || sid >= rows)
{
return;
}
uint32_t i = sid * cols + tid;
derivMatrix[i].r1.x = -matrix[i].r1.y * K_EAR;
derivMatrix[i].r1.y = matrix[i].r1.x * K_EAR;
derivMatrix[i].r1.z = 0;
derivMatrix[i].r2.x = -matrix[i].r2.y * K_EAR;
derivMatrix[i].r2.y = matrix[i].r2.x * K_EAR;
derivMatrix[i].r2.z = 0.0;
derivMatrix[i].r3.x = -matrix[i].r3.y * K_EAR;
derivMatrix[i].r3.y = matrix[i].r3.x * K_EAR;
derivMatrix[i].r3.z = 0;
}
}
}
} |
a159283d30d6a4169098dc066fbbbb6d3df965fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file count_sketch.cu
* \brief count_sketch op
* \author Chen Zhu, Yang Shi
*/
#include "./count_sketch-inl.h"
#include <mshadow/tensor.h>
#include <stdio.h>
#include <algorithm>
#define WARPS_PER_BLOCK 1
#define THREADS_PER_BLOCK 512
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
// wrappers to deal with atomic add
// supporting only single precision
__device__ void atomic_add(float* dst, float val) {
atomicAdd(dst, val);
}
// for double precision
__device__ void atomic_add(double* address, double val) {
// code example in the official document at:
// http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
// #atomic-functions
// NOLINT_NEXT_LINE(runtime/int)
unsigned long long int* address_as_ull = (unsigned long long int*) address; // NOLINT(*)
unsigned long long int old = *address_as_ull, assumed; // NOLINT(*)
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN
// (since NaN != NaN)
} while (assumed != old);
}
template <typename DType>
__global__ void sketch_forward_kernel(const int nthreads, DType *out, const DType *h,
const DType *s, const DType *in, const int n_smaples,
const int in_dim, const int out_dim) {
// input: n_smaples * in_dim
// output: n_smaples * out_dim
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= nthreads) {
return;
}
// nthreads is the maximum of thread indices, should be equal to in_dim
// index is point index
const int i_indim = index % in_dim;
const int i_sample = index / in_dim;
// get the target location in the output
const int target = i_sample*out_dim + h[i_indim];
atomic_add(out + target, s[i_indim] * in[index]);
}
template <typename DType>
__global__ void sketch_backward_kernel(const int nthreads, DType *in_grad, const DType *h,
const DType *s, const DType *out_grad, const int n_smaples,
const int in_dim, const int out_dim) {
// only calculate gradient regarding x
// can also calculate gradient regarding s if needed
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int i_indim = index % in_dim;
const int i_sample = index / in_dim;
const int i_outdim = i_sample*out_dim + h[i_indim];
in_grad[index] = out_grad[i_outdim] * s[i_indim];
}
} // namespace cuda
// CountSketch Forward
template <typename DType>
inline void CountSketchForward(const Tensor<gpu, 2, DType> &out,
const Tensor<gpu, 2, DType> &in,
const Tensor<gpu, 1, DType> &h,
const Tensor<gpu, 1, DType> &s,
const int n_samples,
const int processing_batch_size,
const int in_dim,
const int out_dim) {
DType *out_ptr = out.dptr_;
const DType *in_ptr = in.dptr_;
const DType *h_ptr = h.dptr_;
const DType *s_ptr = s.dptr_;
int upper_bound = n_samples/processing_batch_size;
if (n_samples%processing_batch_size == 0) {
upper_bound = upper_bound-1;
}
// guarantee there are at least one iteration
upper_bound = upper_bound > 0? upper_bound:0;
int bstart = 0;
for ( int i = 0; i <= upper_bound; i++ ) {
const int batchlen = min(processing_batch_size, n_samples - bstart);
const int nthreads = batchlen * in_dim;
// to make number of threads the same as input
const int threads_per_block = min(THREADS_PER_BLOCK, nthreads);
int nblocks = (nthreads + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( cuda::sketch_forward_kernel<DType>), dim3(nblocks), dim3(threads_per_block), 0, 0,
nthreads, out_ptr+bstart*out_dim, h_ptr,
s_ptr, in_ptr+bstart*in_dim, batchlen,
in_dim, out_dim);
MSHADOW_CUDA_POST_KERNEL_CHECK(sketch_forward_kernel);
// hipDeviceSynchronize();
bstart = (i+1)*batchlen;
}
}
template<typename DType>
inline void CountSketchBackward(const Tensor<gpu, 2, DType> &in_grad,
const Tensor<gpu, 2, DType> &out_grad,
const Tensor<gpu, 1, DType> &h,
const Tensor<gpu, 1, DType> &s,
const int n_samples,
const int processing_batch_size,
const int in_dim,
const int out_dim) {
DType *in_grad_ptr = in_grad.dptr_;
const DType *out_grad_ptr = out_grad.dptr_;
const DType *h_ptr = h.dptr_;
const DType *s_ptr = s.dptr_;
int upper_bound = n_samples/processing_batch_size;
if (n_samples%processing_batch_size == 0) {
upper_bound = upper_bound-1;
}
// guarantee there are at least one iteration
upper_bound = upper_bound > 0? upper_bound:0;
int bstart = 0;
for ( int i = 0; i <= upper_bound; i++ ) {
const int batchlen = min(processing_batch_size, n_samples - bstart);
const int nthreads = batchlen * in_dim;
// to make number of threads the same as input
const int threads_per_block = min(THREADS_PER_BLOCK, nthreads);
int nblocks = (nthreads + threads_per_block - 1) / threads_per_block;
hipLaunchKernelGGL(( cuda::sketch_backward_kernel<DType>), dim3(nblocks), dim3(threads_per_block), 0, 0,
nthreads, in_grad_ptr+bstart*in_dim, h_ptr,
s_ptr, out_grad_ptr+bstart*out_dim, batchlen,
in_dim, out_dim);
MSHADOW_CUDA_POST_KERNEL_CHECK(sketch_backward_kernel);
bstart = (i+1)*batchlen;
}
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(CountSketchParam param, int dtype) {
Operator *op = NULL;
switch (dtype) {
case mshadow::kFloat32:
op = new CountSketchOp<gpu, float>(param);
break;
case mshadow::kFloat64:
op = new CountSketchOp<gpu, double>(param);
break;
case mshadow::kFloat16:
LOG(FATAL) << "float16 count sketch layer is currently"
"not supported.";
break;
default:
LOG(FATAL) << "Unsupported type " << dtype;
}
return op;
}
} // namespace op
} // namespace mxnet
| a159283d30d6a4169098dc066fbbbb6d3df965fc.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file count_sketch.cu
* \brief count_sketch op
* \author Chen Zhu, Yang Shi
*/
#include "./count_sketch-inl.h"
#include <mshadow/tensor.h>
#include <stdio.h>
#include <algorithm>
#define WARPS_PER_BLOCK 1
#define THREADS_PER_BLOCK 512
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
namespace mshadow {
namespace cuda {
// wrappers to deal with atomic add
// supporting only single precision
__device__ void atomic_add(float* dst, float val) {
atomicAdd(dst, val);
}
// for double precision
__device__ void atomic_add(double* address, double val) {
// code example in the official document at:
// http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
// #atomic-functions
// NOLINT_NEXT_LINE(runtime/int)
unsigned long long int* address_as_ull = (unsigned long long int*) address; // NOLINT(*)
unsigned long long int old = *address_as_ull, assumed; // NOLINT(*)
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN
// (since NaN != NaN)
} while (assumed != old);
}
template <typename DType>
__global__ void sketch_forward_kernel(const int nthreads, DType *out, const DType *h,
const DType *s, const DType *in, const int n_smaples,
const int in_dim, const int out_dim) {
// input: n_smaples * in_dim
// output: n_smaples * out_dim
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= nthreads) {
return;
}
// nthreads is the maximum of thread indices, should be equal to in_dim
// index is point index
const int i_indim = index % in_dim;
const int i_sample = index / in_dim;
// get the target location in the output
const int target = i_sample*out_dim + h[i_indim];
atomic_add(out + target, s[i_indim] * in[index]);
}
template <typename DType>
__global__ void sketch_backward_kernel(const int nthreads, DType *in_grad, const DType *h,
const DType *s, const DType *out_grad, const int n_smaples,
const int in_dim, const int out_dim) {
// only calculate gradient regarding x
// can also calculate gradient regarding s if needed
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int i_indim = index % in_dim;
const int i_sample = index / in_dim;
const int i_outdim = i_sample*out_dim + h[i_indim];
in_grad[index] = out_grad[i_outdim] * s[i_indim];
}
} // namespace cuda
// CountSketch Forward
template <typename DType>
inline void CountSketchForward(const Tensor<gpu, 2, DType> &out,
const Tensor<gpu, 2, DType> &in,
const Tensor<gpu, 1, DType> &h,
const Tensor<gpu, 1, DType> &s,
const int n_samples,
const int processing_batch_size,
const int in_dim,
const int out_dim) {
DType *out_ptr = out.dptr_;
const DType *in_ptr = in.dptr_;
const DType *h_ptr = h.dptr_;
const DType *s_ptr = s.dptr_;
int upper_bound = n_samples/processing_batch_size;
if (n_samples%processing_batch_size == 0) {
upper_bound = upper_bound-1;
}
// guarantee there are at least one iteration
upper_bound = upper_bound > 0? upper_bound:0;
int bstart = 0;
for ( int i = 0; i <= upper_bound; i++ ) {
const int batchlen = min(processing_batch_size, n_samples - bstart);
const int nthreads = batchlen * in_dim;
// to make number of threads the same as input
const int threads_per_block = min(THREADS_PER_BLOCK, nthreads);
int nblocks = (nthreads + threads_per_block - 1) / threads_per_block;
cuda::sketch_forward_kernel<DType><<<nblocks, threads_per_block>>>(
nthreads, out_ptr+bstart*out_dim, h_ptr,
s_ptr, in_ptr+bstart*in_dim, batchlen,
in_dim, out_dim);
MSHADOW_CUDA_POST_KERNEL_CHECK(sketch_forward_kernel);
// cudaThreadSynchronize();
bstart = (i+1)*batchlen;
}
}
template<typename DType>
inline void CountSketchBackward(const Tensor<gpu, 2, DType> &in_grad,
const Tensor<gpu, 2, DType> &out_grad,
const Tensor<gpu, 1, DType> &h,
const Tensor<gpu, 1, DType> &s,
const int n_samples,
const int processing_batch_size,
const int in_dim,
const int out_dim) {
DType *in_grad_ptr = in_grad.dptr_;
const DType *out_grad_ptr = out_grad.dptr_;
const DType *h_ptr = h.dptr_;
const DType *s_ptr = s.dptr_;
int upper_bound = n_samples/processing_batch_size;
if (n_samples%processing_batch_size == 0) {
upper_bound = upper_bound-1;
}
// guarantee there are at least one iteration
upper_bound = upper_bound > 0? upper_bound:0;
int bstart = 0;
for ( int i = 0; i <= upper_bound; i++ ) {
const int batchlen = min(processing_batch_size, n_samples - bstart);
const int nthreads = batchlen * in_dim;
// to make number of threads the same as input
const int threads_per_block = min(THREADS_PER_BLOCK, nthreads);
int nblocks = (nthreads + threads_per_block - 1) / threads_per_block;
cuda::sketch_backward_kernel<DType><<<nblocks, threads_per_block>>>(
nthreads, in_grad_ptr+bstart*in_dim, h_ptr,
s_ptr, out_grad_ptr+bstart*out_dim, batchlen,
in_dim, out_dim);
MSHADOW_CUDA_POST_KERNEL_CHECK(sketch_backward_kernel);
bstart = (i+1)*batchlen;
}
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(CountSketchParam param, int dtype) {
Operator *op = NULL;
switch (dtype) {
case mshadow::kFloat32:
op = new CountSketchOp<gpu, float>(param);
break;
case mshadow::kFloat64:
op = new CountSketchOp<gpu, double>(param);
break;
case mshadow::kFloat16:
LOG(FATAL) << "float16 count sketch layer is currently"
"not supported.";
break;
default:
LOG(FATAL) << "Unsupported type " << dtype;
}
return op;
}
} // namespace op
} // namespace mxnet
|
9227f68d36f2df98015feed09938ea76f7bb68b0.hip | // !!! This is a file automatically generated by hipify!!!
//=================================================================//
// CUDA Graph Coloring kernel
// Topological-Driven: one node per thread, thread_centric
//
// Reference:
// A. Grosset, et al. Evaluating Graph Coloring on GPUs
//=================================================================//
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
#define SEED 123
__global__ void initialize(uint32_t * d_vpl, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_vpl[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist,
uint32_t * randlist,
cudaGraph graph,
uint32_t color,
bool * device_over)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (vplist[tid]==MY_INFINITY)
{
uint64_t vid = tid;
uint64_t start, end;
start = graph.get_firstedge_index(vid);
end = graph.get_edge_index_end(vid);
uint32_t local_rand = randlist[vid];
bool found_larger=false;
for (uint64_t i=start; i<end; i++)
{
uint64_t dest = graph.get_edge_dest(i);
if (vplist[dest]<color) continue;
if ( (randlist[dest]>local_rand) ||
(randlist[dest]==local_rand && dest<vid))
{
found_larger = true;
break;
}
}
if (found_larger==false)
vplist[vid] = color;
else
*device_over = false;
}
}
void cuda_graph_coloring(uint64_t * vertexlist,
uint64_t * edgelist,
uint32_t * vproplist,
uint64_t vertex_cnt,
uint64_t edge_cnt)
{
uint32_t * device_vpl = 0;
uint32_t * device_rand = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
hipGetDevice(&device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_rand, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) );
hipEvent_t start_event, stop_event;
cudaErrCheck( hipEventCreate(&start_event) );
cudaErrCheck( hipEventCreate(&stop_event) );
// initialization
hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// memcpy from host to device
hipEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
// gen rand data and temprarily use vproplist to store it
srand(SEED);
for (unsigned i=0;i<vertex_cnt;i++)
{
vproplist[i] = rand();
}
cudaErrCheck( hipMemcpy(device_rand, vproplist, vertex_cnt*sizeof(uint32_t),
hipMemcpyHostToDevice) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
bool stop;
hipEventRecord(start_event, 0);
uint32_t curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = true;
cudaErrCheck( hipMemcpy(device_over, &stop, sizeof(bool), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( kernel), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, device_rand, d_graph, curr, device_over);
cudaErrCheck( hipMemcpy(&stop, device_over, sizeof(bool), hipMemcpyDeviceToHost) );
curr++;
}while(!stop);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&kernel_time, start_event, stop_event);
hipEventRecord(start_event, 0);
cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
hipMemcpyDeviceToHost) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( hipFree(device_vpl) );
cudaErrCheck( hipFree(device_rand) );
}
| 9227f68d36f2df98015feed09938ea76f7bb68b0.cu | //=================================================================//
// CUDA Graph Coloring kernel
// Topological-Driven: one node per thread, thread_centric
//
// Reference:
// A. Grosset, et al. Evaluating Graph Coloring on GPUs
//=================================================================//
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
#define SEED 123
__global__ void initialize(uint32_t * d_vpl, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_vpl[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist,
uint32_t * randlist,
cudaGraph graph,
uint32_t color,
bool * device_over)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= graph.vertex_cnt) return;
if (vplist[tid]==MY_INFINITY)
{
uint64_t vid = tid;
uint64_t start, end;
start = graph.get_firstedge_index(vid);
end = graph.get_edge_index_end(vid);
uint32_t local_rand = randlist[vid];
bool found_larger=false;
for (uint64_t i=start; i<end; i++)
{
uint64_t dest = graph.get_edge_dest(i);
if (vplist[dest]<color) continue;
if ( (randlist[dest]>local_rand) ||
(randlist[dest]==local_rand && dest<vid))
{
found_larger = true;
break;
}
}
if (found_larger==false)
vplist[vid] = color;
else
*device_over = false;
}
}
void cuda_graph_coloring(uint64_t * vertexlist,
uint64_t * edgelist,
uint32_t * vproplist,
uint64_t vertex_cnt,
uint64_t edge_cnt)
{
uint32_t * device_vpl = 0;
uint32_t * device_rand = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
cudaGetDevice(&device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_rand, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) );
cudaEvent_t start_event, stop_event;
cudaErrCheck( cudaEventCreate(&start_event) );
cudaErrCheck( cudaEventCreate(&stop_event) );
// initialization
initialize<<<num_block, num_thread_per_block>>>(device_vpl, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// memcpy from host to device
cudaEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
// gen rand data and temprarily use vproplist to store it
srand(SEED);
for (unsigned i=0;i<vertex_cnt;i++)
{
vproplist[i] = rand();
}
cudaErrCheck( cudaMemcpy(device_rand, vproplist, vertex_cnt*sizeof(uint32_t),
cudaMemcpyHostToDevice) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// BFS traversal
bool stop;
cudaEventRecord(start_event, 0);
uint32_t curr=0;
do
{
// Each iteration processes
// one level of BFS traversal
stop = true;
cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) );
kernel<<<num_block, num_thread_per_block>>>(device_vpl, device_rand, d_graph, curr, device_over);
cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) );
curr++;
}while(!stop);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&kernel_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
cudaErrCheck( cudaFree(device_vpl) );
cudaErrCheck( cudaFree(device_rand) );
}
|
bc904a3c46ac5bef04131102234e8b358c1d9ab0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
/* W B Langdon at MUN 10 May 2007
* Program to demonstarte use of OpenGL's glDrawPixels
*/
#ifdef _WIN32
#include <windows.h>
#endif
/*#include <GL/gl.h>
#include <GL/glext.h>
#include <GL/glut.h>*/
#include "GL/glew.h"
#include "GL/freeglut.h"
#include <iostream>
#include <sstream>
#include "math.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <stdarg.h>
#define SIZE_NBOR_LIST 128
#define SIZE_SUBJ_LIST 32
#define NUM_AGENT 1024
#define DOT_R 1
#define NUM_CELL_DIM 8
#define NUM_CELL (NUM_CELL_DIM * NUM_CELL_DIM)
#define SIZE_BLOCK SIZE_NBOR_LIST
#define SIZE_NBOR_DATA 18
#define SIZE_SUBJ_DATA 16
#define NUM_BLOCK_PER_BATCH 16
#define NUM_BLOCK NUM_BLOCK_PER_BATCH
#define NUM_BATCH 2
#define RANGE 0.05 //environment dim ranging from 0 ~ 1
class AgentData {
public:
double x;
double y;
double velX;
double velY;
double goalX;
double goalY;
double v0;
double mass;
};
class Agent {
public:
AgentData *data;
Agent() {
data = new AgentData();
data->x = (double)rand() / RAND_MAX;
data->y = (double)rand() / RAND_MAX;
}
double calDist(Agent *other) {
float distSqr = (data->x - other->data->x) * (data->x - other->data->x)
+ (data->y - other->data->y) * (data->y - other->data->y);
return sqrt(distSqr);
}
};
struct subjectList {
double *xList;
double *yList;
double *velXList;
double *velYList;
double *goalXList;
double *goalYList;
double *v0List;
double *massList;
void setBlock(int inBatchBlockId, int *subjData) {
xList = (double*)&subjData[0 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
yList = (double*)&subjData[SIZE_SUBJ_LIST * 2 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
velXList = (double*)&subjData[SIZE_SUBJ_LIST * 4 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
velYList = (double*)&subjData[SIZE_SUBJ_LIST * 6 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
goalXList = (double*)&subjData[SIZE_SUBJ_LIST * 8 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
goalYList = (double*)&subjData[SIZE_SUBJ_LIST * 10 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
v0List = (double*)&subjData[SIZE_SUBJ_LIST * 12 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
massList = (double*)&subjData[SIZE_SUBJ_LIST * 14 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
}
};
struct neighborList {
double *xList;
double *yList;
double *velXList;
double *velYList;
double *goalXList;
double *goalYList;
double *v0List;
double *massList;
int *nborIdList;
int *subjIdList;
void setBatch(int inBatchBlockId, int *nborData) {
xList = (double*)&nborData[0 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
yList = (double*)&nborData[SIZE_NBOR_LIST * 2 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
velXList = (double*)&nborData[SIZE_NBOR_LIST * 4 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
velYList = (double*)&nborData[SIZE_NBOR_LIST * 6 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
goalXList = (double*)&nborData[SIZE_NBOR_LIST * 8 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
goalYList = (double*)&nborData[SIZE_NBOR_LIST * 10 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
v0List = (double*)&nborData[SIZE_NBOR_LIST * 12 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
massList = (double*)&nborData[SIZE_NBOR_LIST * 14 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
nborIdList = &nborData[SIZE_NBOR_LIST * 16 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
subjIdList = &nborData[SIZE_NBOR_LIST * 17 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
}
};
struct blockIndices {
int numNbor;
int numSubj;
int firstSubj;
int blockInBatchId;
int batchId;
int iteration;
};
struct GPUBatch {
hipStream_t stream;
int *nborData;
int *nborDataDev;
int *subjData;
int *subjDataDev;
blockIndices *bi;
blockIndices *biDev;
};
unsigned int window_width = 512, window_height = 512;
const int size = window_width*window_height;
Agent **agentList;
float* pixels = new float[size * 3];
int* cidStart = new int[NUM_CELL];
int* cidEnd = new int[NUM_CELL];
int* agentCids = new int[NUM_AGENT];
int* agentIds = new int[NUM_AGENT];
GPUBatch batches[NUM_BATCH];
int batchId = 0;
//FILE *fpOut;
__global__ void agentExecKernel(int *neighborDataDev, int *subjDataDev, blockIndices *biDev) {
extern __shared__ int smem[];
// load neighbor and subject indices
int numNbor = biDev[blockIdx.x].numNbor;
int numSubj = biDev[blockIdx.x].numSubj;
int batchId = biDev[blockIdx.x].batchId;
int blockIdInBatch = biDev[blockIdx.x].blockInBatchId;
int firstSubj = biDev[blockIdx.x].firstSubj;
// load subject data into shared memory
int offset = 0;
while (offset + threadIdx.x < SIZE_SUBJ_LIST * SIZE_SUBJ_DATA) {
smem[offset + threadIdx.x] = subjDataDev[offset + threadIdx.x + blockIdx.x * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
offset += blockDim.x;
}
__syncthreads();
// set subject agent update zone in smem
int *fSumX = &smem[SIZE_SUBJ_LIST * SIZE_SUBJ_DATA]; //update zone for prop 1 follows reading zone
int *fSumY = &smem[SIZE_SUBJ_LIST * SIZE_SUBJ_DATA + SIZE_SUBJ_LIST]; //update zone for prop 1 follows reading zone
offset = 0;
while (offset + threadIdx.x < SIZE_SUBJ_LIST) {
fSumX[offset + threadIdx.x] = 10;
fSumY[offset + threadIdx.x] = 20;
offset += blockDim.x;
}
__syncthreads();
// obtain head address of neighbor data;
double *xList = (double*)&neighborDataDev[0 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *yList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 2 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *velXList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 4 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *velYList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 6 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *goalXList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 8 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *goalYList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 10 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *v0List = (double*)&neighborDataDev[SIZE_NBOR_LIST * 12 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *massList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 14 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
int *nborList = &neighborDataDev[SIZE_NBOR_LIST * 16 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
int *subjList = &neighborDataDev[SIZE_NBOR_LIST * 17 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
// load neighbor data into register
int idx = threadIdx.x + blockIdx.x * blockDim.x;
double x = xList[threadIdx.x];
double y = yList[threadIdx.x];
double velX = velXList[threadIdx.x];
double velY = velYList[threadIdx.x];
double goalX = goalXList[threadIdx.x];
double goalY = goalYList[threadIdx.x];
double v0 = v0List[threadIdx.x];
double mass = massList[threadIdx.x];
int nbor = nborList[threadIdx.x];
int subj = subjList[threadIdx.x];
//do something with neighbor data
double res = x + y + velX + velY + goalX + goalY + v0 + mass;
xList[threadIdx.x] = yList[threadIdx.x] = velXList[threadIdx.x] = velYList[threadIdx.x] =
goalXList[threadIdx.x] = goalYList[threadIdx.x] = v0List[threadIdx.x] = massList[threadIdx.x] = res;
//update subject agent data
int smemIdx = subj - firstSubj;
int temp = 0;
while (++temp < 100 && threadIdx.x < numNbor) {
atomicInc((unsigned int*)&fSumX[smemIdx], NUM_AGENT);
atomicInc((unsigned int*)&fSumY[smemIdx], NUM_AGENT);
}
__syncthreads();
//printf("%d, %d %f, %f\n", threadIdx.x, subj, fSumX, fSumY);
}
namespace util {
int zcode(int x, int y)
{
x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210
y &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210
x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210
y = (y ^ (y << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210
y = (y ^ (y << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
y = (y ^ (y << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10
x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10
y = (y ^ (y << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
return x | (y << 1);
}
//template<class T>
void swap(int *val, int a, int b) {
int temp = val[a];
val[a] = val[b];
val[b] = temp;
}
//template<class T>
void quickSort(int *key, int l, int r, int *value) {
if (l == r)
return;
int randIdx = l + rand() % (r - l);
swap(key, l, randIdx);
swap(value, l, randIdx);
int pivot = key[l];
int i = l + 1, j = l + 1;
for (; j < r; j++) {
if (key[j] < pivot) {
swap(key, i, j);
swap(value, i, j);
i++;
}
}
swap(key, l, i - 1);
swap(value, l, i - 1);
quickSort(key, l, i - 1, value);
quickSort(key, i, r, value);
}
};
void neighborSearching() {
// create batch data structure alias
int inBatchBlockId = 0;
GPUBatch batch = batches[batchId];
int *nborData = batch.nborData;
int *nborDataDev = batch.nborDataDev;
int *subjData = batch.subjData;
int *subjDataDev = batch.subjDataDev;
// manipulate nborData with nborList structure;
neighborList nborList;
nborList.setBatch(inBatchBlockId, nborData);
subjectList subjList;
subjList.setBlock(inBatchBlockId, nborData);
// simulate agent moving
for (int i = 0; i < NUM_AGENT; i++) {
agentIds[i] = i;
int cidX = agentList[i]->data->x * NUM_CELL_DIM;
int cidY = agentList[i]->data->y * NUM_CELL_DIM;
agentCids[i] = util::zcode(cidX, cidY);
}
// sorting agent ptr based on cell id
util::quickSort(agentCids, 0, NUM_AGENT, agentIds);
// construct cidStart and cidEnd
for (int i = 0; i < NUM_AGENT; i++) {
int cidPrev = agentCids[i];
int cidNext = agentCids[(i + 1) % NUM_AGENT];
if (cidPrev != cidNext) {
cidEnd[cidPrev] = i + 1;
cidStart[cidNext] = (i + 1) % NUM_AGENT;
}
}
// simulating generate neighbor list
int nborCount = 0;
int nborCountSettled = 0;
int firstSubj = 0;
bool interrupted = false;
bool batchPrepared = false;
for (int i = 0; i < NUM_AGENT; i++) {
// pick agent based on sorted order, and get its bounding box
int agentId = agentIds[i];
Agent *subj = agentList[agentId];
double posX = subj->data->x;
double posY = subj->data->y;
int j = i - firstSubj;
subjList.xList[j] = subj->data->x;
subjList.yList[j] = subj->data->y;
subjList.velXList[j] = subj->data->velX;
subjList.velYList[j] = subj->data->velY;
subjList.goalXList[j] = subj->data->goalX;
subjList.goalYList[j] = subj->data->goalY;
subjList.v0List[j] = subj->data->v0;
subjList.massList[j] = subj->data->mass;
int cellXMin = (posX - RANGE) * NUM_CELL_DIM;
int cellXMax = (posX + RANGE) * NUM_CELL_DIM;
int cellYMin = (posY - RANGE) * NUM_CELL_DIM;
int cellYMax = (posY + RANGE) * NUM_CELL_DIM;
cellXMin = cellXMin < 0 ? 0 : cellXMin;
cellXMax = cellXMax >= NUM_CELL_DIM ? NUM_CELL_DIM - 1 : cellXMax;
cellYMin = cellYMin < 0 ? 0 : cellYMin;
cellYMax = cellYMax >= NUM_CELL_DIM ? NUM_CELL_DIM - 1 : cellYMax;
//fprintf(fpOut, "%d, %d, (%f, %f), (%d, %d, %d, %d)\n", i, agentId, posX, posY, cellXMin, cellXMax, cellYMin, cellYMax);
// iterate bounding box
for (int cidY = cellYMin; cidY <= cellYMax; cidY++) {
for (int cidX = cellXMin; cidX <= cellXMax; cidX++) {
int cellId = util::zcode(cidX, cidY);
for (int k = cidStart[cellId]; k < cidEnd[cellId]; k++) {
// fill neighbor list
Agent *nbor = agentList[agentIds[k]];
double dist = subj->calDist(nbor);
if (dist < RANGE) {
if (nborCount == SIZE_NBOR_LIST) {
interrupted = true;
break;
}
nborList.xList[nborCount] = nbor->data->x;
nborList.yList[nborCount] = nbor->data->y;
nborList.velXList[nborCount] = nbor->data->velX;
nborList.velYList[nborCount] = nbor->data->velY;
nborList.goalXList[nborCount] = nbor->data->goalX;
nborList.goalYList[nborCount] = nbor->data->goalY;
nborList.v0List[nborCount] = nbor->data->v0;
nborList.massList[nborCount] = nbor->data->mass;
nborList.nborIdList[nborCount] = agentIds[k];
nborList.subjIdList[nborCount] = i;
nborCount++;
}
}
}
}
// nborCount is temporary count, nborCountSettled is the number to be processed
if (!interrupted) {
nborCountSettled = nborCount;
// prepare for current block
batch.bi[inBatchBlockId].numNbor = nborCountSettled;
batch.bi[inBatchBlockId].numSubj = i - firstSubj;
batch.bi[inBatchBlockId].blockInBatchId = inBatchBlockId;
batch.bi[inBatchBlockId].batchId = batchId;
batch.bi[inBatchBlockId].firstSubj = firstSubj;
} else {
// prepare for next block
//debug
for (int jj = 0; jj < 128; jj++) {
int subjtemp = nborList.subjIdList[jj];
int firstSubjTemp = nborList.subjIdList[0];
//fprintf(fpOut, "%d, %d\n", subjtemp, firstSubj);
//fflush(fpOut);
}
interrupted = false;
nborCount = 0;
inBatchBlockId = (++inBatchBlockId) % NUM_BLOCK_PER_BATCH;
firstSubj = i;
i--;
nborList.setBatch(inBatchBlockId, nborData);
subjList.setBlock(inBatchBlockId, subjData);
if (inBatchBlockId == 0)
batchPrepared = true;
}
if (i == NUM_AGENT - 1)
batchPrepared = true;
if (firstSubj == 1020)
printf("Hello world");
// perform GPU processing
if (batchPrepared) {
batchPrepared = false;
hipMemcpyAsync(nborDataDev, nborData, sizeof(int) * SIZE_NBOR_LIST * SIZE_NBOR_DATA * NUM_BLOCK_PER_BATCH, hipMemcpyHostToDevice, batch.stream);
hipMemcpyAsync(subjDataDev, subjData, sizeof(int) * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA * NUM_BLOCK_PER_BATCH, hipMemcpyHostToDevice, batch.stream);
hipMemcpyAsync(batch.biDev, batch.bi, sizeof(blockIndices) * NUM_BLOCK_PER_BATCH, hipMemcpyHostToDevice, batch.stream);
//fprintf(fpOut, "\n one batch");
//fflush(fpOut);
error = hipGetLastError();
printf("Copy Error: %s\n", hipGetErrorString(error));
size_t modZoneSize = sizeof(int) * SIZE_SUBJ_LIST * 2;
size_t smemSize = sizeof(int) * SIZE_SUBJ_DATA * SIZE_SUBJ_LIST + modZoneSize;
agentExecKernel << <NUM_BLOCK, SIZE_BLOCK, smemSize, batch.stream >> >(nborDataDev, subjDataDev, batch.biDev);
error = hipGetLastError();
printf("Exec Error: %s\n", hipGetErrorString(error));
// create batch data structure alias
batchId = ++batchId % NUM_BATCH;
batch = batches[batchId];
nborData = batch.nborData;
nborDataDev = batch.nborDataDev;
subjData = batch.subjData;
subjDataDev = batch.subjDataDev;
nborList.setBatch(inBatchBlockId, nborData);
subjList.setBlock(inBatchBlockId, subjData);
// wait for next stream's previous work to be done
hipStreamSynchronize(batch.stream);
hipError_t error = hipGetLastError();
printf("Sync Error: %s\n", hipGetErrorString(error));
}
}
}
void display()
{
//Create some nice colours (3 floats per pixel) from data -10..+10
memset(pixels, 0, size * 3 * sizeof(float));
for (int i = 0; i < NUM_AGENT; i++) {
//colour(10.0 - ((i*20.0) / size), &pixels[i * 3]);
int canvasX = agentList[i]->data->x * window_width;
int canvasY = agentList[i]->data->y * window_height;
for (int i = canvasX - DOT_R; i < canvasX + DOT_R; i++) {
for (int j = canvasY - DOT_R; j < canvasY + DOT_R; j++) {
if (i >= 0 && i < window_width && j >= 0 && j < window_height) {
int idx = j * window_width + i;
pixels[idx * 3] = 1;
pixels[idx * 3 + 1] = 1;
pixels[idx * 3 + 2] = 1;
}
}
}
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//http://msdn2.microsoft.com/en-us/library/ms537062.aspx
//glDrawPixels writes a block of pixels to the framebuffer.
glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixels);
glutSwapBuffers();
glutPostRedisplay();
}
int main(int argc, char** argv) {
//Initialization
srand(0);
agentList = (Agent**)malloc(sizeof(Agent*) * NUM_AGENT);
for (int i = 0; i < NUM_AGENT; i++) {
agentList[i] = new Agent();
}
for (int i = 0; i < NUM_BATCH; i++) {
hipStreamCreate(&batches[i].stream);
//batches[i].stream = 0;
hipHostMalloc((void**)&batches[i].nborData, sizeof(int) * SIZE_NBOR_LIST * SIZE_NBOR_DATA * NUM_BLOCK_PER_BATCH);
hipHostMalloc((void**)&batches[i].subjData, sizeof(int) * SIZE_SUBJ_DATA * SIZE_SUBJ_LIST * NUM_BLOCK_PER_BATCH);
hipMalloc((void**)&batches[i].nborDataDev, sizeof(int) * SIZE_NBOR_LIST * SIZE_NBOR_DATA * NUM_BLOCK_PER_BATCH);
hipMalloc((void**)&batches[i].subjDataDev, sizeof(int) * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA * NUM_BLOCK_PER_BATCH);
hipHostMalloc((void**)&batches[i].bi, sizeof(blockIndices) * NUM_BLOCK_PER_BATCH);
hipMalloc((void**)&batches[i].biDev, sizeof(blockIndices) * NUM_BLOCK_PER_BATCH);
}
//fpOut = fopen("output.txt", "w");
// Visualization
/*
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("OpenGL glDrawPixels demo");
glutDisplayFunc(display);
//glutReshapeFunc(reshape);
//glutMouseFunc(mouse_button);
//glutMotionFunc(mouse_motion);
//glutKeyboardFunc(keyboard);
//glutIdleFunc(idle);
glEnable(GL_DEPTH_TEST);
glClearColor(0.0, 0.0, 0.0, 1.0);
//glPointSize(2);
*/
int tick = 0;
while (tick++ < 1000) {
for (int i = 0; i < NUM_AGENT; i++) {
agentList[i]->data->x = (double)rand() / RAND_MAX;
agentList[i]->data->y = (double)rand() / RAND_MAX;
agentList[i]->data->velX = -1;
agentList[i]->data->velY = -1;
agentList[i]->data->goalX = -1;
agentList[i]->data->goalY = -1;
agentList[i]->data->v0 = -1;
agentList[i]->data->mass = -1;
}
neighborSearching();
//glutMainLoopEvent();
}
//glutMainLoop();
} | bc904a3c46ac5bef04131102234e8b358c1d9ab0.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
/* W B Langdon at MUN 10 May 2007
* Program to demonstarte use of OpenGL's glDrawPixels
*/
#ifdef _WIN32
#include <windows.h>
#endif
/*#include <GL/gl.h>
#include <GL/glext.h>
#include <GL/glut.h>*/
#include "GL/glew.h"
#include "GL/freeglut.h"
#include <iostream>
#include <sstream>
#include "math.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <stdarg.h>
#define SIZE_NBOR_LIST 128
#define SIZE_SUBJ_LIST 32
#define NUM_AGENT 1024
#define DOT_R 1
#define NUM_CELL_DIM 8
#define NUM_CELL (NUM_CELL_DIM * NUM_CELL_DIM)
#define SIZE_BLOCK SIZE_NBOR_LIST
#define SIZE_NBOR_DATA 18
#define SIZE_SUBJ_DATA 16
#define NUM_BLOCK_PER_BATCH 16
#define NUM_BLOCK NUM_BLOCK_PER_BATCH
#define NUM_BATCH 2
#define RANGE 0.05 //environment dim ranging from 0 ~ 1
class AgentData {
public:
double x;
double y;
double velX;
double velY;
double goalX;
double goalY;
double v0;
double mass;
};
class Agent {
public:
AgentData *data;
Agent() {
data = new AgentData();
data->x = (double)rand() / RAND_MAX;
data->y = (double)rand() / RAND_MAX;
}
double calDist(Agent *other) {
float distSqr = (data->x - other->data->x) * (data->x - other->data->x)
+ (data->y - other->data->y) * (data->y - other->data->y);
return sqrt(distSqr);
}
};
struct subjectList {
double *xList;
double *yList;
double *velXList;
double *velYList;
double *goalXList;
double *goalYList;
double *v0List;
double *massList;
void setBlock(int inBatchBlockId, int *subjData) {
xList = (double*)&subjData[0 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
yList = (double*)&subjData[SIZE_SUBJ_LIST * 2 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
velXList = (double*)&subjData[SIZE_SUBJ_LIST * 4 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
velYList = (double*)&subjData[SIZE_SUBJ_LIST * 6 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
goalXList = (double*)&subjData[SIZE_SUBJ_LIST * 8 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
goalYList = (double*)&subjData[SIZE_SUBJ_LIST * 10 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
v0List = (double*)&subjData[SIZE_SUBJ_LIST * 12 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
massList = (double*)&subjData[SIZE_SUBJ_LIST * 14 + inBatchBlockId * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
}
};
struct neighborList {
double *xList;
double *yList;
double *velXList;
double *velYList;
double *goalXList;
double *goalYList;
double *v0List;
double *massList;
int *nborIdList;
int *subjIdList;
void setBatch(int inBatchBlockId, int *nborData) {
xList = (double*)&nborData[0 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
yList = (double*)&nborData[SIZE_NBOR_LIST * 2 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
velXList = (double*)&nborData[SIZE_NBOR_LIST * 4 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
velYList = (double*)&nborData[SIZE_NBOR_LIST * 6 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
goalXList = (double*)&nborData[SIZE_NBOR_LIST * 8 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
goalYList = (double*)&nborData[SIZE_NBOR_LIST * 10 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
v0List = (double*)&nborData[SIZE_NBOR_LIST * 12 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
massList = (double*)&nborData[SIZE_NBOR_LIST * 14 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
nborIdList = &nborData[SIZE_NBOR_LIST * 16 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
subjIdList = &nborData[SIZE_NBOR_LIST * 17 + inBatchBlockId * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
}
};
struct blockIndices {
int numNbor;
int numSubj;
int firstSubj;
int blockInBatchId;
int batchId;
int iteration;
};
struct GPUBatch {
cudaStream_t stream;
int *nborData;
int *nborDataDev;
int *subjData;
int *subjDataDev;
blockIndices *bi;
blockIndices *biDev;
};
unsigned int window_width = 512, window_height = 512;
const int size = window_width*window_height;
Agent **agentList;
float* pixels = new float[size * 3];
int* cidStart = new int[NUM_CELL];
int* cidEnd = new int[NUM_CELL];
int* agentCids = new int[NUM_AGENT];
int* agentIds = new int[NUM_AGENT];
GPUBatch batches[NUM_BATCH];
int batchId = 0;
//FILE *fpOut;
__global__ void agentExecKernel(int *neighborDataDev, int *subjDataDev, blockIndices *biDev) {
extern __shared__ int smem[];
// load neighbor and subject indices
int numNbor = biDev[blockIdx.x].numNbor;
int numSubj = biDev[blockIdx.x].numSubj;
int batchId = biDev[blockIdx.x].batchId;
int blockIdInBatch = biDev[blockIdx.x].blockInBatchId;
int firstSubj = biDev[blockIdx.x].firstSubj;
// load subject data into shared memory
int offset = 0;
while (offset + threadIdx.x < SIZE_SUBJ_LIST * SIZE_SUBJ_DATA) {
smem[offset + threadIdx.x] = subjDataDev[offset + threadIdx.x + blockIdx.x * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA];
offset += blockDim.x;
}
__syncthreads();
// set subject agent update zone in smem
int *fSumX = &smem[SIZE_SUBJ_LIST * SIZE_SUBJ_DATA]; //update zone for prop 1 follows reading zone
int *fSumY = &smem[SIZE_SUBJ_LIST * SIZE_SUBJ_DATA + SIZE_SUBJ_LIST]; //update zone for prop 1 follows reading zone
offset = 0;
while (offset + threadIdx.x < SIZE_SUBJ_LIST) {
fSumX[offset + threadIdx.x] = 10;
fSumY[offset + threadIdx.x] = 20;
offset += blockDim.x;
}
__syncthreads();
// obtain head address of neighbor data;
double *xList = (double*)&neighborDataDev[0 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *yList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 2 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *velXList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 4 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *velYList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 6 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *goalXList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 8 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *goalYList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 10 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *v0List = (double*)&neighborDataDev[SIZE_NBOR_LIST * 12 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
double *massList = (double*)&neighborDataDev[SIZE_NBOR_LIST * 14 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
int *nborList = &neighborDataDev[SIZE_NBOR_LIST * 16 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
int *subjList = &neighborDataDev[SIZE_NBOR_LIST * 17 + blockIdx.x * SIZE_NBOR_LIST * SIZE_NBOR_DATA];
// load neighbor data into register
int idx = threadIdx.x + blockIdx.x * blockDim.x;
double x = xList[threadIdx.x];
double y = yList[threadIdx.x];
double velX = velXList[threadIdx.x];
double velY = velYList[threadIdx.x];
double goalX = goalXList[threadIdx.x];
double goalY = goalYList[threadIdx.x];
double v0 = v0List[threadIdx.x];
double mass = massList[threadIdx.x];
int nbor = nborList[threadIdx.x];
int subj = subjList[threadIdx.x];
//do something with neighbor data
double res = x + y + velX + velY + goalX + goalY + v0 + mass;
xList[threadIdx.x] = yList[threadIdx.x] = velXList[threadIdx.x] = velYList[threadIdx.x] =
goalXList[threadIdx.x] = goalYList[threadIdx.x] = v0List[threadIdx.x] = massList[threadIdx.x] = res;
//update subject agent data
int smemIdx = subj - firstSubj;
int temp = 0;
while (++temp < 100 && threadIdx.x < numNbor) {
atomicInc((unsigned int*)&fSumX[smemIdx], NUM_AGENT);
atomicInc((unsigned int*)&fSumY[smemIdx], NUM_AGENT);
}
__syncthreads();
//printf("%d, %d %f, %f\n", threadIdx.x, subj, fSumX, fSumY);
}
namespace util {
int zcode(int x, int y)
{
x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210
y &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210
x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210
y = (y ^ (y << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210
y = (y ^ (y << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
y = (y ^ (y << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10
x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10
y = (y ^ (y << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
return x | (y << 1);
}
//template<class T>
void swap(int *val, int a, int b) {
int temp = val[a];
val[a] = val[b];
val[b] = temp;
}
//template<class T>
void quickSort(int *key, int l, int r, int *value) {
if (l == r)
return;
int randIdx = l + rand() % (r - l);
swap(key, l, randIdx);
swap(value, l, randIdx);
int pivot = key[l];
int i = l + 1, j = l + 1;
for (; j < r; j++) {
if (key[j] < pivot) {
swap(key, i, j);
swap(value, i, j);
i++;
}
}
swap(key, l, i - 1);
swap(value, l, i - 1);
quickSort(key, l, i - 1, value);
quickSort(key, i, r, value);
}
};
void neighborSearching() {
// create batch data structure alias
int inBatchBlockId = 0;
GPUBatch batch = batches[batchId];
int *nborData = batch.nborData;
int *nborDataDev = batch.nborDataDev;
int *subjData = batch.subjData;
int *subjDataDev = batch.subjDataDev;
// manipulate nborData with nborList structure;
neighborList nborList;
nborList.setBatch(inBatchBlockId, nborData);
subjectList subjList;
subjList.setBlock(inBatchBlockId, nborData);
// simulate agent moving
for (int i = 0; i < NUM_AGENT; i++) {
agentIds[i] = i;
int cidX = agentList[i]->data->x * NUM_CELL_DIM;
int cidY = agentList[i]->data->y * NUM_CELL_DIM;
agentCids[i] = util::zcode(cidX, cidY);
}
// sorting agent ptr based on cell id
util::quickSort(agentCids, 0, NUM_AGENT, agentIds);
// construct cidStart and cidEnd
for (int i = 0; i < NUM_AGENT; i++) {
int cidPrev = agentCids[i];
int cidNext = agentCids[(i + 1) % NUM_AGENT];
if (cidPrev != cidNext) {
cidEnd[cidPrev] = i + 1;
cidStart[cidNext] = (i + 1) % NUM_AGENT;
}
}
// simulating generate neighbor list
int nborCount = 0;
int nborCountSettled = 0;
int firstSubj = 0;
bool interrupted = false;
bool batchPrepared = false;
for (int i = 0; i < NUM_AGENT; i++) {
// pick agent based on sorted order, and get its bounding box
int agentId = agentIds[i];
Agent *subj = agentList[agentId];
double posX = subj->data->x;
double posY = subj->data->y;
int j = i - firstSubj;
subjList.xList[j] = subj->data->x;
subjList.yList[j] = subj->data->y;
subjList.velXList[j] = subj->data->velX;
subjList.velYList[j] = subj->data->velY;
subjList.goalXList[j] = subj->data->goalX;
subjList.goalYList[j] = subj->data->goalY;
subjList.v0List[j] = subj->data->v0;
subjList.massList[j] = subj->data->mass;
int cellXMin = (posX - RANGE) * NUM_CELL_DIM;
int cellXMax = (posX + RANGE) * NUM_CELL_DIM;
int cellYMin = (posY - RANGE) * NUM_CELL_DIM;
int cellYMax = (posY + RANGE) * NUM_CELL_DIM;
cellXMin = cellXMin < 0 ? 0 : cellXMin;
cellXMax = cellXMax >= NUM_CELL_DIM ? NUM_CELL_DIM - 1 : cellXMax;
cellYMin = cellYMin < 0 ? 0 : cellYMin;
cellYMax = cellYMax >= NUM_CELL_DIM ? NUM_CELL_DIM - 1 : cellYMax;
//fprintf(fpOut, "%d, %d, (%f, %f), (%d, %d, %d, %d)\n", i, agentId, posX, posY, cellXMin, cellXMax, cellYMin, cellYMax);
// iterate bounding box
for (int cidY = cellYMin; cidY <= cellYMax; cidY++) {
for (int cidX = cellXMin; cidX <= cellXMax; cidX++) {
int cellId = util::zcode(cidX, cidY);
for (int k = cidStart[cellId]; k < cidEnd[cellId]; k++) {
// fill neighbor list
Agent *nbor = agentList[agentIds[k]];
double dist = subj->calDist(nbor);
if (dist < RANGE) {
if (nborCount == SIZE_NBOR_LIST) {
interrupted = true;
break;
}
nborList.xList[nborCount] = nbor->data->x;
nborList.yList[nborCount] = nbor->data->y;
nborList.velXList[nborCount] = nbor->data->velX;
nborList.velYList[nborCount] = nbor->data->velY;
nborList.goalXList[nborCount] = nbor->data->goalX;
nborList.goalYList[nborCount] = nbor->data->goalY;
nborList.v0List[nborCount] = nbor->data->v0;
nborList.massList[nborCount] = nbor->data->mass;
nborList.nborIdList[nborCount] = agentIds[k];
nborList.subjIdList[nborCount] = i;
nborCount++;
}
}
}
}
// nborCount is temporary count, nborCountSettled is the number to be processed
if (!interrupted) {
nborCountSettled = nborCount;
// prepare for current block
batch.bi[inBatchBlockId].numNbor = nborCountSettled;
batch.bi[inBatchBlockId].numSubj = i - firstSubj;
batch.bi[inBatchBlockId].blockInBatchId = inBatchBlockId;
batch.bi[inBatchBlockId].batchId = batchId;
batch.bi[inBatchBlockId].firstSubj = firstSubj;
} else {
// prepare for next block
//debug
for (int jj = 0; jj < 128; jj++) {
int subjtemp = nborList.subjIdList[jj];
int firstSubjTemp = nborList.subjIdList[0];
//fprintf(fpOut, "%d, %d\n", subjtemp, firstSubj);
//fflush(fpOut);
}
interrupted = false;
nborCount = 0;
inBatchBlockId = (++inBatchBlockId) % NUM_BLOCK_PER_BATCH;
firstSubj = i;
i--;
nborList.setBatch(inBatchBlockId, nborData);
subjList.setBlock(inBatchBlockId, subjData);
if (inBatchBlockId == 0)
batchPrepared = true;
}
if (i == NUM_AGENT - 1)
batchPrepared = true;
if (firstSubj == 1020)
printf("Hello world");
// perform GPU processing
if (batchPrepared) {
batchPrepared = false;
cudaMemcpyAsync(nborDataDev, nborData, sizeof(int) * SIZE_NBOR_LIST * SIZE_NBOR_DATA * NUM_BLOCK_PER_BATCH, cudaMemcpyHostToDevice, batch.stream);
cudaMemcpyAsync(subjDataDev, subjData, sizeof(int) * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA * NUM_BLOCK_PER_BATCH, cudaMemcpyHostToDevice, batch.stream);
cudaMemcpyAsync(batch.biDev, batch.bi, sizeof(blockIndices) * NUM_BLOCK_PER_BATCH, cudaMemcpyHostToDevice, batch.stream);
//fprintf(fpOut, "\n one batch");
//fflush(fpOut);
error = cudaGetLastError();
printf("Copy Error: %s\n", cudaGetErrorString(error));
size_t modZoneSize = sizeof(int) * SIZE_SUBJ_LIST * 2;
size_t smemSize = sizeof(int) * SIZE_SUBJ_DATA * SIZE_SUBJ_LIST + modZoneSize;
agentExecKernel << <NUM_BLOCK, SIZE_BLOCK, smemSize, batch.stream >> >(nborDataDev, subjDataDev, batch.biDev);
error = cudaGetLastError();
printf("Exec Error: %s\n", cudaGetErrorString(error));
// create batch data structure alias
batchId = ++batchId % NUM_BATCH;
batch = batches[batchId];
nborData = batch.nborData;
nborDataDev = batch.nborDataDev;
subjData = batch.subjData;
subjDataDev = batch.subjDataDev;
nborList.setBatch(inBatchBlockId, nborData);
subjList.setBlock(inBatchBlockId, subjData);
// wait for next stream's previous work to be done
cudaStreamSynchronize(batch.stream);
cudaError_t error = cudaGetLastError();
printf("Sync Error: %s\n", cudaGetErrorString(error));
}
}
}
void display()
{
//Create some nice colours (3 floats per pixel) from data -10..+10
memset(pixels, 0, size * 3 * sizeof(float));
for (int i = 0; i < NUM_AGENT; i++) {
//colour(10.0 - ((i*20.0) / size), &pixels[i * 3]);
int canvasX = agentList[i]->data->x * window_width;
int canvasY = agentList[i]->data->y * window_height;
for (int i = canvasX - DOT_R; i < canvasX + DOT_R; i++) {
for (int j = canvasY - DOT_R; j < canvasY + DOT_R; j++) {
if (i >= 0 && i < window_width && j >= 0 && j < window_height) {
int idx = j * window_width + i;
pixels[idx * 3] = 1;
pixels[idx * 3 + 1] = 1;
pixels[idx * 3 + 2] = 1;
}
}
}
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//http://msdn2.microsoft.com/en-us/library/ms537062.aspx
//glDrawPixels writes a block of pixels to the framebuffer.
glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixels);
glutSwapBuffers();
glutPostRedisplay();
}
int main(int argc, char** argv) {
//Initialization
srand(0);
agentList = (Agent**)malloc(sizeof(Agent*) * NUM_AGENT);
for (int i = 0; i < NUM_AGENT; i++) {
agentList[i] = new Agent();
}
for (int i = 0; i < NUM_BATCH; i++) {
cudaStreamCreate(&batches[i].stream);
//batches[i].stream = 0;
cudaMallocHost((void**)&batches[i].nborData, sizeof(int) * SIZE_NBOR_LIST * SIZE_NBOR_DATA * NUM_BLOCK_PER_BATCH);
cudaMallocHost((void**)&batches[i].subjData, sizeof(int) * SIZE_SUBJ_DATA * SIZE_SUBJ_LIST * NUM_BLOCK_PER_BATCH);
cudaMalloc((void**)&batches[i].nborDataDev, sizeof(int) * SIZE_NBOR_LIST * SIZE_NBOR_DATA * NUM_BLOCK_PER_BATCH);
cudaMalloc((void**)&batches[i].subjDataDev, sizeof(int) * SIZE_SUBJ_LIST * SIZE_SUBJ_DATA * NUM_BLOCK_PER_BATCH);
cudaMallocHost((void**)&batches[i].bi, sizeof(blockIndices) * NUM_BLOCK_PER_BATCH);
cudaMalloc((void**)&batches[i].biDev, sizeof(blockIndices) * NUM_BLOCK_PER_BATCH);
}
//fpOut = fopen("output.txt", "w");
// Visualization
/*
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("OpenGL glDrawPixels demo");
glutDisplayFunc(display);
//glutReshapeFunc(reshape);
//glutMouseFunc(mouse_button);
//glutMotionFunc(mouse_motion);
//glutKeyboardFunc(keyboard);
//glutIdleFunc(idle);
glEnable(GL_DEPTH_TEST);
glClearColor(0.0, 0.0, 0.0, 1.0);
//glPointSize(2);
*/
int tick = 0;
while (tick++ < 1000) {
for (int i = 0; i < NUM_AGENT; i++) {
agentList[i]->data->x = (double)rand() / RAND_MAX;
agentList[i]->data->y = (double)rand() / RAND_MAX;
agentList[i]->data->velX = -1;
agentList[i]->data->velY = -1;
agentList[i]->data->goalX = -1;
agentList[i]->data->goalY = -1;
agentList[i]->data->v0 = -1;
agentList[i]->data->mass = -1;
}
neighborSearching();
//glutMainLoopEvent();
}
//glutMainLoop();
} |
ed748c1181a9cc451e5bccaaa6c2400ce283ca0c.hip | // !!! This is a file automatically generated by hipify!!!
// fnv.cuh
#define FNV_PRIME 0x01000193
#define fnv(x,y) ((x) * FNV_PRIME ^(y))
__device__ uint4 fnv4(uint4 a, uint4 b)
{
uint4 c;
c.x = a.x * FNV_PRIME ^ b.x;
c.y = a.y * FNV_PRIME ^ b.y;
c.z = a.z * FNV_PRIME ^ b.z;
c.w = a.w * FNV_PRIME ^ b.w;
return c;
}
__device__ uint32_t fnv_reduce(uint4 v)
{
return fnv(fnv(fnv(v.x, v.y), v.z), v.w);
}
// keccak.cuh
__device__ __constant__ uint64_t const keccak_round_constants[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL,
0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL,
0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
__device__ __forceinline__
uint2 xor5(const uint2 a, const uint2 b, const uint2 c, const uint2 d, const uint2 e) {
return a ^ b ^ c ^ d ^ e;
}
__device__ __forceinline__
uint2 xor3(const uint2 a, const uint2 b, const uint2 c) {
return a ^ b ^ c;
}
__device__ __forceinline__
uint2 chi(const uint2 a, const uint2 b, const uint2 c) {
return a ^ (~b) & c;
}
__device__ __forceinline__ void keccak_f1600_init(uint2* state)
{
uint2 s[25];
uint2 t[5], u, v;
const uint2 u2zero = make_uint2(0, 0);
devectorize2(d_header.uint4s[0], s[0], s[1]);
devectorize2(d_header.uint4s[1], s[2], s[3]);
s[4] = state[4];
s[5] = make_uint2(1, 0);
s[6] = u2zero;
s[7] = u2zero;
s[8] = make_uint2(0, 0x80000000);
for (uint32_t i = 9; i < 25; i++)
s[i] = u2zero;
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0].x = s[0].x ^ s[5].x;
t[0].y = s[0].y;
t[1] = s[1];
t[2] = s[2];
t[3].x = s[3].x;
t[3].y = s[3].y ^ s[8].y;
t[4] = s[4];
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[0]);
for (int i = 1; i < 23; i++)
{
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0] , s[5] , s[10] , s[15] , s[20]);
t[1] = xor5(s[1] , s[6] , s[11] , s[16] , s[21]);
t[2] = xor5(s[2] , s[7] , s[12] , s[17] , s[22]);
t[3] = xor5(s[3] , s[8] , s[13] , s[18] , s[23]);
t[4] = xor5(s[4] , s[9] , s[14] , s[19] , s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[i]);
}
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[10] = xor3(s[10], t[4], u);
u = ROL2(t[2], 1);
s[6] = xor3(s[6], t[0], u);
s[16] = xor3(s[16], t[0], u);
u = ROL2(t[3], 1);
s[12] = xor3(s[12], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[18] = xor3(s[18], t[2], u);
u = ROL2(t[0], 1);
s[9] = xor3(s[9], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[2] = ROL2(s[12], 43);
s[4] = ROL2(s[24], 14);
s[8] = ROL2(s[16], 45);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[7] = ROL2(s[10], 3);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[23]);
for(int i = 0; i < 12; ++i)
state[i] = s[i];
}
__device__ __forceinline__ uint64_t keccak_f1600_final(uint2* state)
{
uint2 s[25];
uint2 t[5], u, v;
const uint2 u2zero = make_uint2(0, 0);
for (int i = 0; i < 12; ++i)
s[i] = state[i];
s[12] = make_uint2(1, 0);
s[13] = u2zero;
s[14] = u2zero;
s[15] = u2zero;
s[16] = make_uint2(0, 0x80000000);
for (uint32_t i = 17; i < 25; i++)
s[i] = u2zero;
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor3(s[0], s[5], s[10]);
t[1] = xor3(s[1], s[6], s[11]) ^ s[16];
t[2] = xor3(s[2], s[7], s[12]);
t[3] = s[3] ^ s[8];
t[4] = s[4] ^ s[9];
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[0]);
for (int i = 1; i < 23; i++)
{
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[i]);
}
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
s[0] = xor3(s[0], t[4], ROL2(t[1], 1));
s[6] = xor3(s[6], t[0], ROL2(t[2], 1));
s[12] = xor3(s[12], t[1], ROL2(t[3], 1));
s[1] = ROL2(s[6], 44);
s[2] = ROL2(s[12], 43);
s[0] = chi(s[0], s[1], s[2]);
/* iota: a[0,0] ^= round constant */
//s[0] ^= vectorize(keccak_round_constants[23]);
return devectorize(s[0]) ^ keccak_round_constants[23];
}
__device__ __forceinline__ void SHA3_512(uint2* s) {
uint2 t[5], u, v;
for (uint32_t i = 8; i < 25; i++)
{
s[i] = make_uint2(0, 0);
}
s[8].x = 1;
s[8].y = 0x80000000;
for (int i = 0; i < 23; i++)
{
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[i]);
}
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[10] = xor3(s[10], t[4], u);
u = ROL2(t[2], 1);
s[6] = xor3(s[6], t[0], u);
s[16] = xor3(s[16], t[0], u);
u = ROL2(t[3], 1);
s[12] = xor3(s[12], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[18] = xor3(s[18], t[2], u);
u = ROL2(t[0], 1);
s[9] = xor3(s[9], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[2] = ROL2(s[12], 43);
s[4] = ROL2(s[24], 14);
s[8] = ROL2(s[16], 45);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[7] = ROL2(s[10], 3);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[23]);
}
// globals.h
__constant__ uint32_t d_dag_size;
__constant__ hash128_t* d_dag;
__constant__ uint32_t d_light_size;
__constant__ hash64_t* d_light;
__constant__ hash32_t d_header;
__constant__ uint64_t d_target;
// kernel.h
#include <stdexcept>
#include <string>
#include <sstream>
#include <stdint.h>
#include <hip/hip_runtime.h>
// It is virtually impossible to get more than
// one solution per stream hash calculation
// Leave room for up to 4 results. A power
// of 2 here will yield better CUDA optimization
#define SEARCH_RESULTS 4
typedef struct {
uint32_t count;
struct {
// One word for gid and 8 for mix hash
uint32_t gid;
uint32_t mix[8];
uint32_t pad[7]; // pad to size power of 2
} result[SEARCH_RESULTS];
} search_results;
#define ACCESSES 64
#define THREADS_PER_HASH (128 / 16)
typedef struct
{
uint4 uint4s[32 / sizeof(uint4)];
} hash32_t;
typedef struct
{
uint4 uint4s[128 / sizeof(uint4)];
} hash128_t;
typedef union {
uint32_t words[64 / sizeof(uint32_t)];
uint2 uint2s[64 / sizeof(uint2)];
uint4 uint4s[64 / sizeof(uint4)];
} hash64_t;
typedef union {
uint32_t words[200 / sizeof(uint32_t)];
uint2 uint2s[200 / sizeof(uint2)];
uint4 uint4s[200 / sizeof(uint4)];
} hash200_t;
void set_constants(
hash128_t* _dag,
uint32_t _dag_size,
hash64_t * _light,
uint32_t _light_size
);
void set_header(
hash32_t _header
);
void set_target(
uint64_t _target
);
void run_ethash_search(
uint32_t gridSize,
uint32_t blockSize,
hipStream_t stream,
volatile search_results* g_output,
uint64_t start_nonce,
uint32_t parallelHash
);
void ethash_generate_dag(
uint64_t dag_size,
uint32_t blocks,
uint32_t threads,
hipStream_t stream
);
struct cuda_runtime_error : public virtual std::runtime_error
{
cuda_runtime_error( const std::string &msg ) : std::runtime_error(msg) {}
};
#define CUDA_SAFE_CALL(call) \
do { \
hipError_t err = call; \
if (hipSuccess != err) { \
std::stringstream ss; \
ss << "CUDA error in func " \
<< __FUNCTION__ \
<< " at line " \
<< __LINE__ \
<< ' ' \
<< hipGetErrorString(err); \
throw cuda_runtime_error(ss.str()); \
} \
} while (0) | ed748c1181a9cc451e5bccaaa6c2400ce283ca0c.cu | // fnv.cuh
#define FNV_PRIME 0x01000193
#define fnv(x,y) ((x) * FNV_PRIME ^(y))
__device__ uint4 fnv4(uint4 a, uint4 b)
{
uint4 c;
c.x = a.x * FNV_PRIME ^ b.x;
c.y = a.y * FNV_PRIME ^ b.y;
c.z = a.z * FNV_PRIME ^ b.z;
c.w = a.w * FNV_PRIME ^ b.w;
return c;
}
__device__ uint32_t fnv_reduce(uint4 v)
{
return fnv(fnv(fnv(v.x, v.y), v.z), v.w);
}
// keccak.cuh
__device__ __constant__ uint64_t const keccak_round_constants[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808AULL,
0x8000000080008000ULL, 0x000000000000808BULL, 0x0000000080000001ULL,
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008AULL,
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000AULL,
0x000000008000808BULL, 0x800000000000008BULL, 0x8000000000008089ULL,
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
0x000000000000800AULL, 0x800000008000000AULL, 0x8000000080008081ULL,
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
__device__ __forceinline__
uint2 xor5(const uint2 a, const uint2 b, const uint2 c, const uint2 d, const uint2 e) {
return a ^ b ^ c ^ d ^ e;
}
__device__ __forceinline__
uint2 xor3(const uint2 a, const uint2 b, const uint2 c) {
return a ^ b ^ c;
}
__device__ __forceinline__
uint2 chi(const uint2 a, const uint2 b, const uint2 c) {
return a ^ (~b) & c;
}
__device__ __forceinline__ void keccak_f1600_init(uint2* state)
{
uint2 s[25];
uint2 t[5], u, v;
const uint2 u2zero = make_uint2(0, 0);
devectorize2(d_header.uint4s[0], s[0], s[1]);
devectorize2(d_header.uint4s[1], s[2], s[3]);
s[4] = state[4];
s[5] = make_uint2(1, 0);
s[6] = u2zero;
s[7] = u2zero;
s[8] = make_uint2(0, 0x80000000);
for (uint32_t i = 9; i < 25; i++)
s[i] = u2zero;
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0].x = s[0].x ^ s[5].x;
t[0].y = s[0].y;
t[1] = s[1];
t[2] = s[2];
t[3].x = s[3].x;
t[3].y = s[3].y ^ s[8].y;
t[4] = s[4];
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[0]);
for (int i = 1; i < 23; i++)
{
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0] , s[5] , s[10] , s[15] , s[20]);
t[1] = xor5(s[1] , s[6] , s[11] , s[16] , s[21]);
t[2] = xor5(s[2] , s[7] , s[12] , s[17] , s[22]);
t[3] = xor5(s[3] , s[8] , s[13] , s[18] , s[23]);
t[4] = xor5(s[4] , s[9] , s[14] , s[19] , s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[i]);
}
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[10] = xor3(s[10], t[4], u);
u = ROL2(t[2], 1);
s[6] = xor3(s[6], t[0], u);
s[16] = xor3(s[16], t[0], u);
u = ROL2(t[3], 1);
s[12] = xor3(s[12], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[18] = xor3(s[18], t[2], u);
u = ROL2(t[0], 1);
s[9] = xor3(s[9], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[2] = ROL2(s[12], 43);
s[4] = ROL2(s[24], 14);
s[8] = ROL2(s[16], 45);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[7] = ROL2(s[10], 3);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[23]);
for(int i = 0; i < 12; ++i)
state[i] = s[i];
}
__device__ __forceinline__ uint64_t keccak_f1600_final(uint2* state)
{
uint2 s[25];
uint2 t[5], u, v;
const uint2 u2zero = make_uint2(0, 0);
for (int i = 0; i < 12; ++i)
s[i] = state[i];
s[12] = make_uint2(1, 0);
s[13] = u2zero;
s[14] = u2zero;
s[15] = u2zero;
s[16] = make_uint2(0, 0x80000000);
for (uint32_t i = 17; i < 25; i++)
s[i] = u2zero;
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor3(s[0], s[5], s[10]);
t[1] = xor3(s[1], s[6], s[11]) ^ s[16];
t[2] = xor3(s[2], s[7], s[12]);
t[3] = s[3] ^ s[8];
t[4] = s[4] ^ s[9];
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[0]);
for (int i = 1; i < 23; i++)
{
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[i]);
}
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
s[0] = xor3(s[0], t[4], ROL2(t[1], 1));
s[6] = xor3(s[6], t[0], ROL2(t[2], 1));
s[12] = xor3(s[12], t[1], ROL2(t[3], 1));
s[1] = ROL2(s[6], 44);
s[2] = ROL2(s[12], 43);
s[0] = chi(s[0], s[1], s[2]);
/* iota: a[0,0] ^= round constant */
//s[0] ^= vectorize(keccak_round_constants[23]);
return devectorize(s[0]) ^ keccak_round_constants[23];
}
__device__ __forceinline__ void SHA3_512(uint2* s) {
uint2 t[5], u, v;
for (uint32_t i = 8; i < 25; i++)
{
s[i] = make_uint2(0, 0);
}
s[8].x = 1;
s[8].y = 0x80000000;
for (int i = 0; i < 23; i++)
{
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[5] = xor3(s[5], t[4], u);
s[10] = xor3(s[10], t[4], u);
s[15] = xor3(s[15], t[4], u);
s[20] = xor3(s[20], t[4], u);
u = ROL2(t[2], 1);
s[1] = xor3(s[1], t[0], u);
s[6] = xor3(s[6], t[0], u);
s[11] = xor3(s[11], t[0], u);
s[16] = xor3(s[16], t[0], u);
s[21] = xor3(s[21], t[0], u);
u = ROL2(t[3], 1);
s[2] = xor3(s[2], t[1], u);
s[7] = xor3(s[7], t[1], u);
s[12] = xor3(s[12], t[1], u);
s[17] = xor3(s[17], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[8] = xor3(s[8], t[2], u);
s[13] = xor3(s[13], t[2], u);
s[18] = xor3(s[18], t[2], u);
s[23] = xor3(s[23], t[2], u);
u = ROL2(t[0], 1);
s[4] = xor3(s[4], t[3], u);
s[9] = xor3(s[9], t[3], u);
s[14] = xor3(s[14], t[3], u);
s[19] = xor3(s[19], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[22] = ROL2(s[14], 39);
s[14] = ROL2(s[20], 18);
s[20] = ROL2(s[2], 62);
s[2] = ROL2(s[12], 43);
s[12] = ROL2(s[13], 25);
s[13] = ROL2(s[19], 8);
s[19] = ROL2(s[23], 56);
s[23] = ROL2(s[15], 41);
s[15] = ROL2(s[4], 27);
s[4] = ROL2(s[24], 14);
s[24] = ROL2(s[21], 2);
s[21] = ROL2(s[8], 55);
s[8] = ROL2(s[16], 45);
s[16] = ROL2(s[5], 36);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[18] = ROL2(s[17], 15);
s[17] = ROL2(s[11], 10);
s[11] = ROL2(s[7], 6);
s[7] = ROL2(s[10], 3);
s[10] = ROL2(u, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
u = s[5]; v = s[6];
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
s[8] = chi(s[8], s[9], u);
s[9] = chi(s[9], u, v);
u = s[10]; v = s[11];
s[10] = chi(s[10], s[11], s[12]);
s[11] = chi(s[11], s[12], s[13]);
s[12] = chi(s[12], s[13], s[14]);
s[13] = chi(s[13], s[14], u);
s[14] = chi(s[14], u, v);
u = s[15]; v = s[16];
s[15] = chi(s[15], s[16], s[17]);
s[16] = chi(s[16], s[17], s[18]);
s[17] = chi(s[17], s[18], s[19]);
s[18] = chi(s[18], s[19], u);
s[19] = chi(s[19], u, v);
u = s[20]; v = s[21];
s[20] = chi(s[20], s[21], s[22]);
s[21] = chi(s[21], s[22], s[23]);
s[22] = chi(s[22], s[23], s[24]);
s[23] = chi(s[23], s[24], u);
s[24] = chi(s[24], u, v);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[i]);
}
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
t[0] = xor5(s[0], s[5], s[10], s[15], s[20]);
t[1] = xor5(s[1], s[6], s[11], s[16], s[21]);
t[2] = xor5(s[2], s[7], s[12], s[17], s[22]);
t[3] = xor5(s[3], s[8], s[13], s[18], s[23]);
t[4] = xor5(s[4], s[9], s[14], s[19], s[24]);
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
/* theta: a[0,i], a[1,i], .. a[4,i] ^= d[i] */
u = ROL2(t[1], 1);
s[0] = xor3(s[0], t[4], u);
s[10] = xor3(s[10], t[4], u);
u = ROL2(t[2], 1);
s[6] = xor3(s[6], t[0], u);
s[16] = xor3(s[16], t[0], u);
u = ROL2(t[3], 1);
s[12] = xor3(s[12], t[1], u);
s[22] = xor3(s[22], t[1], u);
u = ROL2(t[4], 1);
s[3] = xor3(s[3], t[2], u);
s[18] = xor3(s[18], t[2], u);
u = ROL2(t[0], 1);
s[9] = xor3(s[9], t[3], u);
s[24] = xor3(s[24], t[3], u);
/* rho pi: b[..] = rotl(a[..], ..) */
u = s[1];
s[1] = ROL2(s[6], 44);
s[6] = ROL2(s[9], 20);
s[9] = ROL2(s[22], 61);
s[2] = ROL2(s[12], 43);
s[4] = ROL2(s[24], 14);
s[8] = ROL2(s[16], 45);
s[5] = ROL2(s[3], 28);
s[3] = ROL2(s[18], 21);
s[7] = ROL2(s[10], 3);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
u = s[0]; v = s[1];
s[0] = chi(s[0], s[1], s[2]);
s[1] = chi(s[1], s[2], s[3]);
s[2] = chi(s[2], s[3], s[4]);
s[3] = chi(s[3], s[4], u);
s[4] = chi(s[4], u, v);
s[5] = chi(s[5], s[6], s[7]);
s[6] = chi(s[6], s[7], s[8]);
s[7] = chi(s[7], s[8], s[9]);
/* iota: a[0,0] ^= round constant */
s[0] ^= vectorize(keccak_round_constants[23]);
}
// globals.h
__constant__ uint32_t d_dag_size;
__constant__ hash128_t* d_dag;
__constant__ uint32_t d_light_size;
__constant__ hash64_t* d_light;
__constant__ hash32_t d_header;
__constant__ uint64_t d_target;
// kernel.h
#include <stdexcept>
#include <string>
#include <sstream>
#include <stdint.h>
#include <cuda_runtime.h>
// It is virtually impossible to get more than
// one solution per stream hash calculation
// Leave room for up to 4 results. A power
// of 2 here will yield better CUDA optimization
#define SEARCH_RESULTS 4
typedef struct {
uint32_t count;
struct {
// One word for gid and 8 for mix hash
uint32_t gid;
uint32_t mix[8];
uint32_t pad[7]; // pad to size power of 2
} result[SEARCH_RESULTS];
} search_results;
#define ACCESSES 64
#define THREADS_PER_HASH (128 / 16)
typedef struct
{
uint4 uint4s[32 / sizeof(uint4)];
} hash32_t;
typedef struct
{
uint4 uint4s[128 / sizeof(uint4)];
} hash128_t;
typedef union {
uint32_t words[64 / sizeof(uint32_t)];
uint2 uint2s[64 / sizeof(uint2)];
uint4 uint4s[64 / sizeof(uint4)];
} hash64_t;
typedef union {
uint32_t words[200 / sizeof(uint32_t)];
uint2 uint2s[200 / sizeof(uint2)];
uint4 uint4s[200 / sizeof(uint4)];
} hash200_t;
void set_constants(
hash128_t* _dag,
uint32_t _dag_size,
hash64_t * _light,
uint32_t _light_size
);
void set_header(
hash32_t _header
);
void set_target(
uint64_t _target
);
void run_ethash_search(
uint32_t gridSize,
uint32_t blockSize,
cudaStream_t stream,
volatile search_results* g_output,
uint64_t start_nonce,
uint32_t parallelHash
);
void ethash_generate_dag(
uint64_t dag_size,
uint32_t blocks,
uint32_t threads,
cudaStream_t stream
);
struct cuda_runtime_error : public virtual std::runtime_error
{
cuda_runtime_error( const std::string &msg ) : std::runtime_error(msg) {}
};
#define CUDA_SAFE_CALL(call) \
do { \
cudaError_t err = call; \
if (cudaSuccess != err) { \
std::stringstream ss; \
ss << "CUDA error in func " \
<< __FUNCTION__ \
<< " at line " \
<< __LINE__ \
<< ' ' \
<< cudaGetErrorString(err); \
throw cuda_runtime_error(ss.str()); \
} \
} while (0) |
73d5f55106e7ddccef6cbe451879115ef67d148d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ClusterLocalGray.cu
//
#include "ClusterLocalGray.h"
#include <iostream>
#include <stdio.h>
using namespace std;
// DEF_BLOCK_X DEF_BLOCK_Y
//
#define DEF_BLOCK_X 16
#define DEF_BLOCK_Y 16
// MAX_NB_SIDE_SPAN
//
#define MAX_NB_SIDE_SPAN 16
// IMG_SMEM_SPAN
// block
#define IMG_SMEM_SPAN 48
// GRAY_RESULTION
// bin
#define GRAY_RESULTION 4
// GRAYBIN_NUM
// 256 4 64 bin
#define GRAYBIN_NUM 64
// CHECK_SHARED_MOMORY_DEBUG
//
// #define CHECK_SHARED_MOMORY_DEBUG
// __device__ checkSharedMemory
// block
// 0 1
__device__ int checkSharedMemory(
unsigned char *imgSharedMem, //
ImageCuda inimg, //
int imgX, // block x
int imgY, // block x
int sharedMenSpan //
);
// __device__ checkSharedMemory
__device__ int checkSharedMemory(unsigned char *imgSharedMem, ImageCuda inimg,
int imgX, int imgY, int sharedMenSpan)
{
for (int y = 0; y < sharedMenSpan; y++) {
for (int x = 0; x < sharedMenSpan; x++) {
//
if (imgSharedMem[y * sharedMenSpan + x] !=
inimg.imgMeta.imgData[(imgY + y) *
inimg.pitchBytes + imgX + x]) {
//
printf("%d, %d ", imgSharedMem[y * sharedMenSpan + x],
inimg.imgMeta.imgData[(imgY + y) * inimg.pitchBytes +
imgX + x]);
printf("%d, %d %d, %d \n", x, y,
imgX + x, (imgY + y));
// return 1;
}
}
}
return 0;
}
// Kernel _clusterLocalGrayKer
// block 32 * 32 block 64 * 64
// 4 neighborsSideSpan
// 64 bin hGrayPercentTh
// lGrayPercentTh
// grayGapTh
static __global__ void _clusterLocalGrayKer(
ImageCuda inimg, //
ImageCuda outimg, //
unsigned char neighborsSideSpan, //
unsigned char hGrayPercentTh, //
unsigned char lGrayPercentTh, //
unsigned char grayGapTh //
);
// Kernel _clusterLocalGrayKer
static __global__ void _clusterLocalGrayKer(ImageCuda inimg, ImageCuda outimg,
unsigned char neighborsSideSpan,
unsigned char hGrayPercentTh,
unsigned char lGrayPercentTh,
unsigned char grayGapTh)
{
// dstc dstr x y
// c column r row
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = blockIdx.y * blockDim.y + threadIdx.y;
//
int curpos = (dstr + MAX_NB_SIDE_SPAN) * inimg.pitchBytes + dstc +
MAX_NB_SIDE_SPAN;
// block 48 * 48
__shared__ unsigned char imgSharedMem[2304];
// block
//
//
unsigned char *tempImgData = inimg.imgMeta.imgData;
int temp1 = threadIdx.y * IMG_SMEM_SPAN + threadIdx.x;
int temp2 = dstr * inimg.pitchBytes + dstc;
imgSharedMem[temp1] = tempImgData[temp2];
imgSharedMem[temp1 + MAX_NB_SIDE_SPAN] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN];
imgSharedMem[temp1 + MAX_NB_SIDE_SPAN * 2] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN * 2];
temp1 = (threadIdx.y + MAX_NB_SIDE_SPAN) * IMG_SMEM_SPAN;
temp2 = (dstr + MAX_NB_SIDE_SPAN) * inimg.pitchBytes + dstc;
imgSharedMem[temp1 + threadIdx.x] = tempImgData[temp2];
imgSharedMem[temp1 + threadIdx.x + MAX_NB_SIDE_SPAN] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN];
imgSharedMem[temp1 + threadIdx.x + MAX_NB_SIDE_SPAN * 2] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN * 2];
temp1 = (threadIdx.y + MAX_NB_SIDE_SPAN * 2) * IMG_SMEM_SPAN;
temp2 = (dstr + MAX_NB_SIDE_SPAN * 2) * inimg.pitchBytes + dstc;
imgSharedMem[temp1 + threadIdx.x] = tempImgData[temp2];
imgSharedMem[temp1 + threadIdx.x + MAX_NB_SIDE_SPAN] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN];
imgSharedMem[temp1 + threadIdx.x + MAX_NB_SIDE_SPAN * 2] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN * 2];
__syncthreads();
#ifdef CHECK_SHARED_MOMORY_DEBUG
//
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (checkSharedMemory(imgSharedMem, inimg, dstc, dstr, IMG_SMEM_SPAN) == 0) {
printf("blockIdx.x = %d, blockIdx.y = %d, copy sharedMen successful!\n",
blockIdx.x, blockIdx.y);
} else {
printf("blockIdx.x = %d, blockIdx.y = %d, copy sharedMen failed!\n",
blockIdx.x, blockIdx.y);
}
}
#endif
// bin
unsigned short cp[GRAYBIN_NUM];
//
for (int i = 0; i < GRAYBIN_NUM; ++i)
cp[i] = 0;
//
unsigned short neighborsSpan = neighborsSideSpan * 2 + 1;
//
unsigned short neighborsArea = neighborsSpan * neighborsSpan;
// bin
for (int i = 0; i < neighborsArea; ++i)
{
cp[imgSharedMem[threadIdx.x + MAX_NB_SIDE_SPAN - neighborsSideSpan + i %
neighborsSpan +
(threadIdx.y + MAX_NB_SIDE_SPAN - neighborsSideSpan + i /
neighborsSpan)
* IMG_SMEM_SPAN] >> 2] += 1;
}
//
int m = 0;
//
int gSum = 0;
//
int lNumTh = lGrayPercentTh * neighborsArea / 100;
for ( int n = 0; m < lNumTh && n < GRAYBIN_NUM; n++) {
//
m += cp[n];
//
gSum += n * cp[n];
}
//
unsigned char aveLg = (unsigned char)(GRAY_RESULTION * gSum / m + 2 + 0.5f);
//
m = 0;
gSum = 0;
//
int hNumTh = hGrayPercentTh * neighborsArea / 100;
for (int n= 64 - 1; m < hNumTh && n >= 0; n--) {
//
m += cp[n];
//
gSum += n * cp[n];
}
//
unsigned char aveHg = (unsigned char)(GRAY_RESULTION * gSum / m + 2 + 0.5f);
// pixel gray
unsigned char gc = imgSharedMem[IMG_SMEM_SPAN * (threadIdx.y + MAX_NB_SIDE_SPAN) + threadIdx.x + MAX_NB_SIDE_SPAN];
//
unsigned char ag = (aveHg + aveLg) >> 1;
// grayGapTh
if ((aveHg - aveLg) < grayGapTh)
outimg.imgMeta.imgData[curpos] = ag;
else if (gc >= aveHg )
outimg.imgMeta.imgData[curpos] = fmin(aveHg * 1.03f, 255); // Enhancing high gray.
else if ( gc <= aveLg )
outimg.imgMeta.imgData[curpos] = aveLg * 0.98f; // Depressing low gray.
else if ( gc >= ag )
outimg.imgMeta.imgData[curpos] = (aveHg + ag) >> 1;
else
outimg.imgMeta.imgData[curpos] = (aveLg + ag) >> 1;
}
// clusterLocalGray
__host__ int ClusterLocalGray::clusterLocalGray(Image *inimg, Image *outimg)
{
//
int errcode;
//
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
// device
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// ROI
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// inimg outimg outimg
// device
errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg);
if (errcode != NO_ERROR)
return errcode;
//
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// Kernel
dim3 gridsize, blocksize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
//
gridsize.x = (outsubimgCud.imgMeta.width - MAX_NB_SIDE_SPAN * 2 + blocksize.x - 1) /
blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height - MAX_NB_SIDE_SPAN * 2 + blocksize.y - 1) /
blocksize.y;
//
hipLaunchKernelGGL(( _clusterLocalGrayKer), dim3(gridsize), dim3(blocksize), 0, 0, insubimgCud, outsubimgCud,
this->getNeighborsSideSpan(),
this->getHGrayPercentTh(),
this->getLGrayPercentTh(),
this->getGrayGapTh());
if (hipGetLastError() != hipSuccess) {
//
return CUDA_ERROR;
}
return NO_ERROR;
}
| 73d5f55106e7ddccef6cbe451879115ef67d148d.cu | // ClusterLocalGray.cu
// 实现图像的分类降噪操作
#include "ClusterLocalGray.h"
#include <iostream>
#include <stdio.h>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 16
#define DEF_BLOCK_Y 16
// 宏:MAX_NB_SIDE_SPAN
// 像素点最大处理范围。
#define MAX_NB_SIDE_SPAN 16
// 宏:IMG_SMEM_SPAN
// 一个 block 对应的有效共享内存大小。
#define IMG_SMEM_SPAN 48
// 宏:GRAY_RESULTION
// 每一个 bin 的大小。
#define GRAY_RESULTION 4
// 宏:GRAYBIN_NUM
// 将 256 个灰度值按 4 个为一组进行划分,划分成 64 个 bin。
#define GRAYBIN_NUM 64
// 宏:CHECK_SHARED_MOMORY_DEBUG
// 条件编译开关。
// #define CHECK_SHARED_MOMORY_DEBUG
// __device__ 函数:checkSharedMemory(判断内存拷贝是否成功)
// 条件编译执行函数。判断 block 内是否正确地将图像相应位置拷贝到共享内存中。
// 成功返回 0,不成功则返回 1。
__device__ int checkSharedMemory(
unsigned char *imgSharedMem, // 共享内存数组
ImageCuda inimg, // 输入图像
int imgX, // block 对应的图像范围的开始 x 坐标
int imgY, // block 对应的图像范围的开始 x 坐标
int sharedMenSpan // 共享内存的大小
);
// __device__ 函数:checkSharedMemory(判断内存拷贝是否成功)
__device__ int checkSharedMemory(unsigned char *imgSharedMem, ImageCuda inimg,
int imgX, int imgY, int sharedMenSpan)
{
for (int y = 0; y < sharedMenSpan; y++) {
for (int x = 0; x < sharedMenSpan; x++) {
// 如果共享内存与所对应的图像上区域有像素点值不同,则认为拷贝失败。
if (imgSharedMem[y * sharedMenSpan + x] !=
inimg.imgMeta.imgData[(imgY + y) *
inimg.pitchBytes + imgX + x]) {
// 打印错误点的坐标。
printf("%d, %d ", imgSharedMem[y * sharedMenSpan + x],
inimg.imgMeta.imgData[(imgY + y) * inimg.pitchBytes +
imgX + x]);
printf("%d, %d %d, %d \n", x, y,
imgX + x, (imgY + y));
// return 1;
}
}
}
return 0;
}
// Kernel 函数:_clusterLocalGrayKer(图像的分类降噪)
// 每一个 block 的大小为 32 * 32, 在一个 block 内需要将 64 * 64 大小的图像
// 拷贝到共享内存中,即一个线程拷贝 4 个像素点。再根据输入参数 neighborsSideSpan
// 统计像素点领域内像素点的个数,将其分成 64 个 bin,然后再根据 hGrayPercentTh
// 和 lGrayPercentTh 计算出当前像素点高像素比例和低像素比例之间的差值,与
// grayGapTh 进行对比从而选择对该点进行增强、降低、中庸操作。
static __global__ void _clusterLocalGrayKer(
ImageCuda inimg, // 输入图像
ImageCuda outimg, // 输出图像
unsigned char neighborsSideSpan, // 领域大小
unsigned char hGrayPercentTh, // 高像素比例
unsigned char lGrayPercentTh, // 低像素比例
unsigned char grayGapTh // 外部参数
);
// Kernel 函数:_clusterLocalGrayKer(图像的分类降噪)
static __global__ void _clusterLocalGrayKer(ImageCuda inimg, ImageCuda outimg,
unsigned char neighborsSideSpan,
unsigned char hGrayPercentTh,
unsigned char lGrayPercentTh,
unsigned char grayGapTh)
{
// dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量(其中,
// c 表示 column, r 表示 row)。
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = blockIdx.y * blockDim.y + threadIdx.y;
// 获取当前像素点在暂存图像中的相对位置。
int curpos = (dstr + MAX_NB_SIDE_SPAN) * inimg.pitchBytes + dstc +
MAX_NB_SIDE_SPAN;
// 为 block 内的线程需要处理的图像开辟共享内存,大小为 48 * 48。
__shared__ unsigned char imgSharedMem[2304];
// 将本 block 对应在图像上的区域拷贝到共享内存中,考虑合并访问的问题,
// 不是拷贝一个方块,而是尽量同一行。同时将一些变量先计算出来以便减少
// 计算量。
unsigned char *tempImgData = inimg.imgMeta.imgData;
int temp1 = threadIdx.y * IMG_SMEM_SPAN + threadIdx.x;
int temp2 = dstr * inimg.pitchBytes + dstc;
imgSharedMem[temp1] = tempImgData[temp2];
imgSharedMem[temp1 + MAX_NB_SIDE_SPAN] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN];
imgSharedMem[temp1 + MAX_NB_SIDE_SPAN * 2] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN * 2];
temp1 = (threadIdx.y + MAX_NB_SIDE_SPAN) * IMG_SMEM_SPAN;
temp2 = (dstr + MAX_NB_SIDE_SPAN) * inimg.pitchBytes + dstc;
imgSharedMem[temp1 + threadIdx.x] = tempImgData[temp2];
imgSharedMem[temp1 + threadIdx.x + MAX_NB_SIDE_SPAN] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN];
imgSharedMem[temp1 + threadIdx.x + MAX_NB_SIDE_SPAN * 2] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN * 2];
temp1 = (threadIdx.y + MAX_NB_SIDE_SPAN * 2) * IMG_SMEM_SPAN;
temp2 = (dstr + MAX_NB_SIDE_SPAN * 2) * inimg.pitchBytes + dstc;
imgSharedMem[temp1 + threadIdx.x] = tempImgData[temp2];
imgSharedMem[temp1 + threadIdx.x + MAX_NB_SIDE_SPAN] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN];
imgSharedMem[temp1 + threadIdx.x + MAX_NB_SIDE_SPAN * 2] =
tempImgData[temp2 + MAX_NB_SIDE_SPAN * 2];
__syncthreads();
#ifdef CHECK_SHARED_MOMORY_DEBUG
// 在每一个块的第一个线程内判断内存拷贝是否成功。
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (checkSharedMemory(imgSharedMem, inimg, dstc, dstr, IMG_SMEM_SPAN) == 0) {
printf("blockIdx.x = %d, blockIdx.y = %d, copy sharedMen successful!\n",
blockIdx.x, blockIdx.y);
} else {
printf("blockIdx.x = %d, blockIdx.y = %d, copy sharedMen failed!\n",
blockIdx.x, blockIdx.y);
}
}
#endif
// 存放该点处理区域范围内像素值的分布,根据 bin 进行划分。
unsigned short cp[GRAYBIN_NUM];
// 共享内存初始化
for (int i = 0; i < GRAYBIN_NUM; ++i)
cp[i] = 0;
// 获得该像素点处理范围的边长。
unsigned short neighborsSpan = neighborsSideSpan * 2 + 1;
// 获得该像素点处理范围的面积。
unsigned short neighborsArea = neighborsSpan * neighborsSpan;
// 对该像素点处理范围内的点根据 bin 进行基数排序。
for (int i = 0; i < neighborsArea; ++i)
{
cp[imgSharedMem[threadIdx.x + MAX_NB_SIDE_SPAN - neighborsSideSpan + i %
neighborsSpan +
(threadIdx.y + MAX_NB_SIDE_SPAN - neighborsSideSpan + i /
neighborsSpan)
* IMG_SMEM_SPAN] >> 2] += 1;
}
// 比例范围内点的总数。
int m = 0;
// 比例范围内像素点的灰度累计值。
int gSum = 0;
// 根据处理范围和低处理比例获得低处理点的数量。
int lNumTh = lGrayPercentTh * neighborsArea / 100;
for ( int n = 0; m < lNumTh && n < GRAYBIN_NUM; n++) {
// 点数累积。
m += cp[n];
// 灰度累积。
gSum += n * cp[n];
}
// 计算低处理比例内的平均灰度值。
unsigned char aveLg = (unsigned char)(GRAY_RESULTION * gSum / m + 2 + 0.5f);
// 重新赋值为初值。
m = 0;
gSum = 0;
// 根据处理范围和高处理比例获得高处理点的数量。
int hNumTh = hGrayPercentTh * neighborsArea / 100;
for (int n= 64 - 1; m < hNumTh && n >= 0; n--) {
// 点数累积。
m += cp[n];
// 灰度累积。
gSum += n * cp[n];
}
// 计算高处理比例内的平均灰度值。
unsigned char aveHg = (unsigned char)(GRAY_RESULTION * gSum / m + 2 + 0.5f);
// 当前 pixel 的 gray 值。
unsigned char gc = imgSharedMem[IMG_SMEM_SPAN * (threadIdx.y + MAX_NB_SIDE_SPAN) + threadIdx.x + MAX_NB_SIDE_SPAN];
// 计算平庸值。
unsigned char ag = (aveHg + aveLg) >> 1;
// 根据 grayGapTh 外部参数决定对当前点进行何种处理。
if ((aveHg - aveLg) < grayGapTh)
outimg.imgMeta.imgData[curpos] = ag;
else if (gc >= aveHg )
outimg.imgMeta.imgData[curpos] = fmin(aveHg * 1.03f, 255); // Enhancing high gray.
else if ( gc <= aveLg )
outimg.imgMeta.imgData[curpos] = aveLg * 0.98f; // Depressing low gray.
else if ( gc >= ag )
outimg.imgMeta.imgData[curpos] = (aveHg + ag) >> 1;
else
outimg.imgMeta.imgData[curpos] = (aveLg + ag) >> 1;
}
// 成员方法:clusterLocalGray(图像分类降噪处理)
__host__ int ClusterLocalGray::clusterLocalGray(Image *inimg, Image *outimg)
{
// 局部变量,错误码。
int errcode;
// 检查输入图像,输出图像是否为空。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
// 将输入图像复制到 device
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 将输入图像 inimg 完全拷贝到输出图像 outimg ,并将 outimg 拷贝到
// device 端。
errcode = ImageBasicOp::copyToCurrentDevice(inimg, outimg);
if (errcode != NO_ERROR)
return errcode;
// 提取输出图像
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 gridsize, blocksize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
// 这里需要减去边界的宽度来计算图像的有效位置。
gridsize.x = (outsubimgCud.imgMeta.width - MAX_NB_SIDE_SPAN * 2 + blocksize.x - 1) /
blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height - MAX_NB_SIDE_SPAN * 2 + blocksize.y - 1) /
blocksize.y;
// 调用核函数,开始第一步细化操作。
_clusterLocalGrayKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud,
this->getNeighborsSideSpan(),
this->getHGrayPercentTh(),
this->getLGrayPercentTh(),
this->getGrayGapTh());
if (cudaGetLastError() != cudaSuccess) {
// 核函数出错,结束迭代函数。
return CUDA_ERROR;
}
return NO_ERROR;
}
|
2740df565727cdbef8831571cdaa0d7e6f872110.hip | // !!! This is a file automatically generated by hipify!!!
// based on https://github.com/NVIDIA/cuda-samples
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
checkCudaErrors(hipEventCreate(&start));
hipEvent_t stop;
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
hipLaunchKernelGGL(( MatrixMulCUDA<16>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
hipLaunchKernelGGL(( MatrixMulCUDA<32>) , dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| 2740df565727cdbef8831571cdaa0d7e6f872110.cu | // based on https://github.com/NVIDIA/cuda-samples
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication which makes use of shared memory
* to ensure data reuse, the matrix multiplication is done using tiling approach.
* It has been written for clarity of exposition to illustrate various CUDA programming
* principles, not with the goal of providing the most performant generic kernel for matrix multiplication.
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void MatrixMulCUDA(float *C, float *A,
float *B, int wA,
int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k) {
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void ConstantInit(float *data, int size, float val) {
for (int i = 0; i < size; ++i) {
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int MatrixMultiply(int argc, char **argv,
int block_size, const dim3 &dimsA,
const dim3 &dimsB) {
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = reinterpret_cast<float *>(malloc(mem_size_A));
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = reinterpret_cast<float *>(malloc(mem_size_B));
// Initialize host memory
const float valB = 0.01f;
ConstantInit(h_A, size_A, 1.0f);
ConstantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = reinterpret_cast<float *>(malloc(mem_size_C));
if (h_C == NULL) {
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_A), mem_size_A));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_B), mem_size_B));
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_C), mem_size_C));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
checkCudaErrors(cudaEventCreate(&start));
cudaEvent_t stop;
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++) {
if (block_size == 16) {
MatrixMulCUDA<16> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
} else {
MatrixMulCUDA<32> <<< grid, threads >>>(d_C, d_A, d_B,
dimsA.x, dimsB.x);
}
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6; // machine zero
for (int i = 0; i < static_cast<int>(dimsC.x * dimsC.y); i++) {
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err / abs_val / dot_length;
if (rel_err > eps) {
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",
i, h_C[i], dimsA.x * valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
printf("\nNOTE: The CUDA Samples are not meant for performance"\
"measurements. Results may vary when GPU Boost is enabled.\n");
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv) {
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?")) {
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices" \
" must be equal.\n");
exit(EXIT_SUCCESS);
}
// This will pick the best possible CUDA capable device, otherwise
// override the device ID based on input provided at the command line
int dev = findCudaDevice(argc, (const char **)argv);
int block_size = 32;
dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1);
dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA")) {
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA")) {
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB")) {
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB")) {
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y) {
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y,
dimsB.x, dimsB.y);
int matrix_result = MatrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
ada4bd2485843740a803ac3189fbf1cad9a97a48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
__global__ void
zmgecsrmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
extern __shared__ magmaDoubleComplex dot[];
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0);
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++ ){
int col = dcolind [ j ];
magmaDoubleComplex val = dval[ j ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[ col + i*num_cols ];
}
for( int i=0; i<num_vecs; i++ )
dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ]
+ beta * dy[ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is CSR.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1);
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaDoubleComplex ); // num_vecs vectors
hipLaunchKernelGGL(( zmgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE , 0,
m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
| ada4bd2485843740a803ac3189fbf1cad9a97a48.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
__global__ void
zmgecsrmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
extern __shared__ magmaDoubleComplex dot[];
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0);
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++ ){
int col = dcolind [ j ];
magmaDoubleComplex val = dval[ j ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[ col + i*num_cols ];
}
for( int i=0; i<num_vecs; i++ )
dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ]
+ beta * dy[ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is CSR.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1);
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaDoubleComplex ); // num_vecs vectors
zmgecsrmv_kernel<<< grid, threads, MEM_SIZE >>>
(m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
|
7abfc5fb31092dd5e43116f6c3698adbacb50c63.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#include "filtered_lrelu.cu"
// Template/kernel specializations for sign write mode.
// Full op, 32-bit indexing.
template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel<c10::Half, int32_t, true, false>(const filtered_lrelu_kernel_params& p, int sharedKB);
template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel<float, int32_t, true, false>(const filtered_lrelu_kernel_params& p, int sharedKB);
// Full op, 64-bit indexing.
template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel<c10::Half, int64_t, true, false>(const filtered_lrelu_kernel_params& p, int sharedKB);
template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel<float, int64_t, true, false>(const filtered_lrelu_kernel_params& p, int sharedKB);
// Activation/signs only for generic variant. 64-bit indexing.
template void* choose_filtered_lrelu_act_kernel<c10::Half, true, false>(void);
template void* choose_filtered_lrelu_act_kernel<float, true, false>(void);
template void* choose_filtered_lrelu_act_kernel<double, true, false>(void);
// Copy filters to constant memory.
template hipError_t copy_filters<true, false>(hipStream_t stream);
| 7abfc5fb31092dd5e43116f6c3698adbacb50c63.cu | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#include "filtered_lrelu.cu"
// Template/kernel specializations for sign write mode.
// Full op, 32-bit indexing.
template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel<c10::Half, int32_t, true, false>(const filtered_lrelu_kernel_params& p, int sharedKB);
template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel<float, int32_t, true, false>(const filtered_lrelu_kernel_params& p, int sharedKB);
// Full op, 64-bit indexing.
template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel<c10::Half, int64_t, true, false>(const filtered_lrelu_kernel_params& p, int sharedKB);
template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel<float, int64_t, true, false>(const filtered_lrelu_kernel_params& p, int sharedKB);
// Activation/signs only for generic variant. 64-bit indexing.
template void* choose_filtered_lrelu_act_kernel<c10::Half, true, false>(void);
template void* choose_filtered_lrelu_act_kernel<float, true, false>(void);
template void* choose_filtered_lrelu_act_kernel<double, true, false>(void);
// Copy filters to constant memory.
template cudaError_t copy_filters<true, false>(cudaStream_t stream);
|
cfb0d23e77564e48c79df650907965d76b5e5f55.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <string.h>
// Include extra helper functions for CUDA - handles image load/save
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
const int k = 128;
const char *imageFilename = "suburb.ppm";
__device__ float cTemp[k*3];
__device__ float count[k];
__device__ bool stop=false;
__global__ void function(unsigned char *in_data,unsigned char *o_data, unsigned int width, unsigned int height,float * clus){
int i = threadIdx.y + blockDim.y*blockIdx.y;
int j = threadIdx.x + blockDim.x*blockIdx.x;
float newsum, r, g, b = 0.0;
float sum = 99999.0;
int cVal = 0;
int idx = i*width*4+j*4;
unsigned int size = k;
for(int a = 0; a < size; a++){
newsum = abs(clus[a*3]-in_data[idx])+abs(clus[a*3+1]-in_data[idx+1])+abs(clus[a*3+2]-in_data[idx+2]);
if(sum > newsum){
cVal = a;
r = (in_data[idx]);
g = (in_data[idx+1]);
b = (in_data[idx+2]);
o_data[idx] = a;
o_data[idx+1]=a;
o_data[idx+2]=a;
sum = newsum;
}
}
/*count[cVal]++;
cTemp[cVal*3] +=r;
cTemp[cVal*3+1] +=g;
cTemp[cVal*3+2] +=b;*/
atomicAdd(&count[cVal],1.0);
atomicAdd(&cTemp[cVal],r);
atomicAdd(&cTemp[cVal+1],g);
atomicAdd(&cTemp[cVal+2],b);
__syncthreads();
}
__global__ void function2(unsigned char *in_data,unsigned char *o_data, unsigned int width, unsigned int height,float * clus){
int i=threadIdx.y + blockDim.y*blockIdx.y;
int j=threadIdx.x + blockDim.x*blockIdx.x;
int idx = i*width*4+j*4;
o_data[idx] = clus[o_data[idx]*3];
o_data[idx+1] = clus[o_data[idx+1]*3+1];
o_data[idx+2] = clus[o_data[idx+2]*3+2];
}
int main(int argc, char **argv){
//timer for the entire program
StopWatchInterface *Ov_timer=NULL;
sdkCreateTimer(&Ov_timer);
sdkStartTimer(&Ov_timer);
//loading input image
char *imagePath = sdkFindFilePath(imageFilename, "");
if (imagePath == NULL){
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
unsigned int width, height;
unsigned char *hData = NULL;
unsigned char *dData = NULL;
//unsigned char *dOut = NULL;
unsigned char *dOut = NULL;
float *dClus = NULL;
char outputFilename[1024];
sdkLoadPPM4ub(imagePath, &hData, &width, &height);
int csize = k;
unsigned int size = width * height * sizeof(unsigned char)*4;
unsigned char * hOutputData = (unsigned char *)malloc(size);
//array 4 cluster centres on host and 4 update
float* clusterH = (float *)malloc(csize*3* sizeof(float));
float* clusterT = (float *)malloc(csize*3* sizeof(float));
float* counter = (float *)malloc(csize* sizeof(float));
hipMalloc((void **) &dData, size);
hipMalloc((void **) &dOut, size);
hipMalloc((void **) &dClus, csize*3*sizeof(float));
//initialize random cluster centers
//int randomnumber, randomnumber1, randomnumber2;
for(int i=0;i<csize;i++){
/*int randomnumber = rand() % height*width;
int randomnumber1 = rand() % height*width;
int randomnumber2 = rand() % height*width;
clusterH[i*3]=hData[randomnumber*4];
clusterH[i*3+1]=hData[randomnumber1*4+1];
clusterH[i*3+2]=hData[randomnumber2*4+2];*/
int randomnum;
randomnum = rand() % width*height;
clusterH[i*3] = hData[randomnum*4];//*1];
clusterH[i*3+1] = hData[randomnum*4+1];//*2+1];
clusterH[i*3+2] = hData[randomnum*4+2];//*3+2];
}
hipMemcpy(dData, hData, size, hipMemcpyHostToDevice);
hipMemcpy(dClus, clusterH , csize*3*sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock(32, 32, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
bool stop = false;
while(stop == false){
hipLaunchKernelGGL(( function), dim3(dimGrid),dim3(dimBlock), 0, 0, dData, dOut, width, height, dClus);
hipDeviceSynchronize();
float error=0;
hipMemcpyFromSymbol(clusterT, cTemp, csize*sizeof(float)*3, 0, hipMemcpyDeviceToHost);
hipMemcpyFromSymbol(counter, count, csize*sizeof(float), 0, hipMemcpyDeviceToHost);
for(int a = 0; a < csize; a++){
if(counter[a] != 0){
error += abs(clusterH[a*3]-clusterT[a*3]/counter[a])+abs(clusterH[a*3+1]-clusterT[a*3+1]/counter[a])+abs(clusterH[a*3+2]-clusterT[a*3+2]/counter[a]);
}
}
if(error/csize<10){
stop=true;
}
for(int s=0;s<csize;s++){
if(counter[s]!=0){
clusterH[s*3]=clusterT[s*3]/counter[s];
clusterH[s*3+1]=clusterT[s*3+1]/counter[s];
clusterH[s*3+2]=clusterT[s*3+2]/counter[s];
}
clusterT[s*3]=0.0;
clusterT[s*3+1]=0.0;
clusterT[s*3+2]=0.0;
counter[s]=0;
}
hipMemcpyToSymbol(dClus, clusterH , csize*3*sizeof(float),0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(cTemp, clusterT , csize*3*sizeof(float),0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(count, counter , csize*sizeof(float),0, hipMemcpyHostToDevice);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( function2), dim3(dimGrid),dim3(dimBlock), 0, 0, dData, dOut, width, height, dClus);
hipDeviceSynchronize();
sdkStopTimer(&timer);
hipMemcpy(hOutputData, dOut, size, hipMemcpyDeviceToHost);
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_out_cuda.ppm");
sdkSavePPM4ub(outputFilename, hOutputData, width, height);
sdkStopTimer(&Ov_timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n", (width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
printf("Overhead time: %f (ms)\n",(sdkGetTimerValue(&Ov_timer) - sdkGetTimerValue(&timer)));
printf("Total time: %f (ms)\n", sdkGetTimerValue(&Ov_timer));
sdkDeleteTimer(&timer);
sdkDeleteTimer(&Ov_timer);
return 0;
}
| cfb0d23e77564e48c79df650907965d76b5e5f55.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <string.h>
// Include extra helper functions for CUDA - handles image load/save
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
const int k = 128;
const char *imageFilename = "suburb.ppm";
__device__ float cTemp[k*3];
__device__ float count[k];
__device__ bool stop=false;
__global__ void function(unsigned char *in_data,unsigned char *o_data, unsigned int width, unsigned int height,float * clus){
int i = threadIdx.y + blockDim.y*blockIdx.y;
int j = threadIdx.x + blockDim.x*blockIdx.x;
float newsum, r, g, b = 0.0;
float sum = 99999.0;
int cVal = 0;
int idx = i*width*4+j*4;
unsigned int size = k;
for(int a = 0; a < size; a++){
newsum = abs(clus[a*3]-in_data[idx])+abs(clus[a*3+1]-in_data[idx+1])+abs(clus[a*3+2]-in_data[idx+2]);
if(sum > newsum){
cVal = a;
r = (in_data[idx]);
g = (in_data[idx+1]);
b = (in_data[idx+2]);
o_data[idx] = a;
o_data[idx+1]=a;
o_data[idx+2]=a;
sum = newsum;
}
}
/*count[cVal]++;
cTemp[cVal*3] +=r;
cTemp[cVal*3+1] +=g;
cTemp[cVal*3+2] +=b;*/
atomicAdd(&count[cVal],1.0);
atomicAdd(&cTemp[cVal],r);
atomicAdd(&cTemp[cVal+1],g);
atomicAdd(&cTemp[cVal+2],b);
__syncthreads();
}
__global__ void function2(unsigned char *in_data,unsigned char *o_data, unsigned int width, unsigned int height,float * clus){
int i=threadIdx.y + blockDim.y*blockIdx.y;
int j=threadIdx.x + blockDim.x*blockIdx.x;
int idx = i*width*4+j*4;
o_data[idx] = clus[o_data[idx]*3];
o_data[idx+1] = clus[o_data[idx+1]*3+1];
o_data[idx+2] = clus[o_data[idx+2]*3+2];
}
int main(int argc, char **argv){
//timer for the entire program
StopWatchInterface *Ov_timer=NULL;
sdkCreateTimer(&Ov_timer);
sdkStartTimer(&Ov_timer);
//loading input image
char *imagePath = sdkFindFilePath(imageFilename, "");
if (imagePath == NULL){
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
unsigned int width, height;
unsigned char *hData = NULL;
unsigned char *dData = NULL;
//unsigned char *dOut = NULL;
unsigned char *dOut = NULL;
float *dClus = NULL;
char outputFilename[1024];
sdkLoadPPM4ub(imagePath, &hData, &width, &height);
int csize = k;
unsigned int size = width * height * sizeof(unsigned char)*4;
unsigned char * hOutputData = (unsigned char *)malloc(size);
//array 4 cluster centres on host and 4 update
float* clusterH = (float *)malloc(csize*3* sizeof(float));
float* clusterT = (float *)malloc(csize*3* sizeof(float));
float* counter = (float *)malloc(csize* sizeof(float));
cudaMalloc((void **) &dData, size);
cudaMalloc((void **) &dOut, size);
cudaMalloc((void **) &dClus, csize*3*sizeof(float));
//initialize random cluster centers
//int randomnumber, randomnumber1, randomnumber2;
for(int i=0;i<csize;i++){
/*int randomnumber = rand() % height*width;
int randomnumber1 = rand() % height*width;
int randomnumber2 = rand() % height*width;
clusterH[i*3]=hData[randomnumber*4];
clusterH[i*3+1]=hData[randomnumber1*4+1];
clusterH[i*3+2]=hData[randomnumber2*4+2];*/
int randomnum;
randomnum = rand() % width*height;
clusterH[i*3] = hData[randomnum*4];//*1];
clusterH[i*3+1] = hData[randomnum*4+1];//*2+1];
clusterH[i*3+2] = hData[randomnum*4+2];//*3+2];
}
cudaMemcpy(dData, hData, size, cudaMemcpyHostToDevice);
cudaMemcpy(dClus, clusterH , csize*3*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(32, 32, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
bool stop = false;
while(stop == false){
function<<<dimGrid,dimBlock>>>(dData, dOut, width, height, dClus);
cudaDeviceSynchronize();
float error=0;
cudaMemcpyFromSymbol(clusterT, cTemp, csize*sizeof(float)*3, 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(counter, count, csize*sizeof(float), 0, cudaMemcpyDeviceToHost);
for(int a = 0; a < csize; a++){
if(counter[a] != 0){
error += abs(clusterH[a*3]-clusterT[a*3]/counter[a])+abs(clusterH[a*3+1]-clusterT[a*3+1]/counter[a])+abs(clusterH[a*3+2]-clusterT[a*3+2]/counter[a]);
}
}
if(error/csize<10){
stop=true;
}
for(int s=0;s<csize;s++){
if(counter[s]!=0){
clusterH[s*3]=clusterT[s*3]/counter[s];
clusterH[s*3+1]=clusterT[s*3+1]/counter[s];
clusterH[s*3+2]=clusterT[s*3+2]/counter[s];
}
clusterT[s*3]=0.0;
clusterT[s*3+1]=0.0;
clusterT[s*3+2]=0.0;
counter[s]=0;
}
cudaMemcpyToSymbol(dClus, clusterH , csize*3*sizeof(float),0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(cTemp, clusterT , csize*3*sizeof(float),0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(count, counter , csize*sizeof(float),0, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
function2<<<dimGrid,dimBlock>>>(dData, dOut, width, height, dClus);
cudaDeviceSynchronize();
sdkStopTimer(&timer);
cudaMemcpy(hOutputData, dOut, size, cudaMemcpyDeviceToHost);
strcpy(outputFilename, imagePath);
strcpy(outputFilename + strlen(imagePath) - 4, "_out_cuda.ppm");
sdkSavePPM4ub(outputFilename, hOutputData, width, height);
sdkStopTimer(&Ov_timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
printf("%.2f Mpixels/sec\n", (width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6);
printf("Overhead time: %f (ms)\n",(sdkGetTimerValue(&Ov_timer) - sdkGetTimerValue(&timer)));
printf("Total time: %f (ms)\n", sdkGetTimerValue(&Ov_timer));
sdkDeleteTimer(&timer);
sdkDeleteTimer(&Ov_timer);
return 0;
}
|
b894f6da20b3afdfc1a4eafef3bd97a9eab59636.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MWElementwiseAffineLayerImpl.hpp"
#include "MWTargetNetworkImpl.hpp"
#include "cnn_api.hpp"
#include "MWKernelHeaders.hpp"
MWElementwiseAffineLayerImpl::MWElementwiseAffineLayerImpl(MWCNNLayer* layer,
MWTargetNetworkImpl* ntwk_impl, int scale_H, int scale_W, int scale_C, int
offset_H, int offset_W, int offset_C, bool isClipped, int lowerbound, int
upperbound, const char* scale_file, const char* offset_file, int ) :
MWCNNLayerImpl(layer, ntwk_impl), pxmnUEWGnfCxJNuDkXAo(NULL),
hYTzvgWajqchLzrmxjqn(NULL), rZyMIPooLjRiXLgSWDuw(scale_H),
rwPhFWHcKnJsClVtebGW(scale_W), qquNiJHQtfSLDMNCPIBJ(scale_C),
hqVFaqkobRNLQNgtbaai(offset_H), ikTyjLTPRBkBRlLSyxXG(offset_W),
hpOzCTZasBMYKoXVxMDZ(offset_C), ZqQxEyCjEixByRZYMkbj(isClipped),
crKSAZwnyiinNFYODxoN(lowerbound), vmBqKEmdajzGggqevoGl(upperbound),
sCDdEyIOjXBVHhcakBhd(nullptr), jLmklYtHcmTxayQTpmRw(nullptr),
qJWXFXvcpbSwehmlTNru(0), GrowsTaKrpHVUZdgZeJW(0), mJnXzwDFPTieqFtWcZIG(0) {
loadScaleAndOffset(scale_file, offset_file); setLayerProperties(); bool
isMatrix2d = (rZyMIPooLjRiXLgSWDuw > 1) && (rwPhFWHcKnJsClVtebGW > 1) &&
(qquNiJHQtfSLDMNCPIBJ != WawamKKnqecNqBXIyHIl); if ((!ZqQxEyCjEixByRZYMkbj) &&
(reGtUwUlPSwEenEBVIzH == hqbKXLMjsDxRQqyJEgbg ) && !isMatrix2d &&
(!eWYFXrUazhqiEIscccda->isSequenceNetwork)) { qeQuIDaHqnxGPDbPoQJF.values
= sCDdEyIOjXBVHhcakBhd; qeQuIDaHqnxGPDbPoQJF.count = reGtUwUlPSwEenEBVIzH;
qeQuIDaHqnxGPDbPoQJF.type = DataType::kFLOAT;
pKmXpiCPxZwpmXlulovZ.values = nullptr; pKmXpiCPxZwpmXlulovZ.count =
0; pKmXpiCPxZwpmXlulovZ.type = DataType::kFLOAT;
suFVgcuEVpCOrewbJfkB.values = jLmklYtHcmTxayQTpmRw;
suFVgcuEVpCOrewbJfkB.count = hqbKXLMjsDxRQqyJEgbg;
suFVgcuEVpCOrewbJfkB.type = DataType::kFLOAT; ITensor* prevLayerTensor =
getInputITensor(0); ScaleMode mode; if (reGtUwUlPSwEenEBVIzH == 1) mode =
ScaleMode::kUNIFORM; else if (YMNbgnUYZspjMLjwcIOS ==
reGtUwUlPSwEenEBVIzH) mode = ScaleMode::kELEMENTWISE; else if (rZyMIPooLjRiXLgSWDuw
== 1 && rwPhFWHcKnJsClVtebGW == 1 && reGtUwUlPSwEenEBVIzH == qquNiJHQtfSLDMNCPIBJ)
mode = ScaleMode::kCHANNEL; qJWXFXvcpbSwehmlTNru =
eWYFXrUazhqiEIscccda->network->addScale(*prevLayerTensor, mode,
suFVgcuEVpCOrewbJfkB, qeQuIDaHqnxGPDbPoQJF,
pKmXpiCPxZwpmXlulovZ); assert(qJWXFXvcpbSwehmlTNru);
qJWXFXvcpbSwehmlTNru->setName(getLayer()->getName().c_str());
setOpTensorPtr(qJWXFXvcpbSwehmlTNru->getOutput(0)); } else { ITensor*
prevLayerTensor = getInputITensor(0); mJnXzwDFPTieqFtWcZIG = new
MWPluginInterfaceImpl(this); GrowsTaKrpHVUZdgZeJW =
eWYFXrUazhqiEIscccda->network->addPlugin(&prevLayerTensor, 1,
*mJnXzwDFPTieqFtWcZIG);
GrowsTaKrpHVUZdgZeJW->setName(getLayer()->getName().c_str());
setOpTensorPtr(GrowsTaKrpHVUZdgZeJW->getOutput(0)); } } void
MWElementwiseAffineLayerImpl::loadScaleAndOffset(const char*
sDWnRjToSPjYnOQzVfhS, const char* jNxFsuLXTFYGOUlfRwLW){
CUDA_CALL(hipMalloc((void**)&pxmnUEWGnfCxJNuDkXAo,
sizeof(float)*rZyMIPooLjRiXLgSWDuw*rwPhFWHcKnJsClVtebGW*qquNiJHQtfSLDMNCPIBJ));
CUDA_CALL(hipMalloc((void**)&hYTzvgWajqchLzrmxjqn,
sizeof(float)*hqVFaqkobRNLQNgtbaai*ikTyjLTPRBkBRlLSyxXG*hpOzCTZasBMYKoXVxMDZ));
loadScale(sDWnRjToSPjYnOQzVfhS); loadOffset(jNxFsuLXTFYGOUlfRwLW); } void
MWElementwiseAffineLayerImpl::setLayerProperties(){ WbTBQxsNsCURmwRhNTAD =
getLayer()->getInputTensor(0)->getHeight(); XGQjNlvPuckcHnviTrkP =
getLayer()->getInputTensor(0)->getWidth(); WawamKKnqecNqBXIyHIl =
getLayer()->getInputTensor(0)->getChannels(); YmfPcXPXNFZDznkzKZrl =
WbTBQxsNsCURmwRhNTAD*XGQjNlvPuckcHnviTrkP; YMNbgnUYZspjMLjwcIOS =
YmfPcXPXNFZDznkzKZrl*WawamKKnqecNqBXIyHIl; YDoginwuwFxabuYCVqpT =
getLayer()->getInputTensor(0)->getNumElements(); reGtUwUlPSwEenEBVIzH =
rZyMIPooLjRiXLgSWDuw * rwPhFWHcKnJsClVtebGW * qquNiJHQtfSLDMNCPIBJ;
hqbKXLMjsDxRQqyJEgbg = hqVFaqkobRNLQNgtbaai * ikTyjLTPRBkBRlLSyxXG *
hpOzCTZasBMYKoXVxMDZ; assert(reGtUwUlPSwEenEBVIzH <= YDoginwuwFxabuYCVqpT);
assert(hqbKXLMjsDxRQqyJEgbg <= YDoginwuwFxabuYCVqpT); } int
MWElementwiseAffineLayerImpl::pluginEnqueueImpl(const void* const* inputs,
void** outputs) { long int uTUuLVVebDakbPjXOQwp = ((YDoginwuwFxabuYCVqpT + 31) / 32)
* 32; long int uqHugYAAqkSnCCYonqCt = (uTUuLVVebDakbPjXOQwp < 1024) ?
uTUuLVVebDakbPjXOQwp : 1024; long int OJTEGflbxqozjWWEaUJd =
(YDoginwuwFxabuYCVqpT + uqHugYAAqkSnCCYonqCt - 1) /
uqHugYAAqkSnCCYonqCt; if (reGtUwUlPSwEenEBVIzH == 1) { hipLaunchKernelGGL((
scale_scalar_kernel), dim3(OJTEGflbxqozjWWEaUJd), dim3(uqHugYAAqkSnCCYonqCt), 0, 0,
(float*)inputs[0], (float*)outputs[0], pxmnUEWGnfCxJNuDkXAo,
YDoginwuwFxabuYCVqpT); } else if (rZyMIPooLjRiXLgSWDuw == 1 && rwPhFWHcKnJsClVtebGW
== 1 && reGtUwUlPSwEenEBVIzH > 1) { hipLaunchKernelGGL((
scale_vector_kernel), dim3(OJTEGflbxqozjWWEaUJd), dim3(uqHugYAAqkSnCCYonqCt), 0, 0,
(float*)inputs[0], (float*)outputs[0], pxmnUEWGnfCxJNuDkXAo,
YmfPcXPXNFZDznkzKZrl, YMNbgnUYZspjMLjwcIOS,
YDoginwuwFxabuYCVqpT); } else if (YMNbgnUYZspjMLjwcIOS ==
reGtUwUlPSwEenEBVIzH) { hipLaunchKernelGGL(( scale_tensor3d_kernel), dim3(OJTEGflbxqozjWWEaUJd),
dim3(uqHugYAAqkSnCCYonqCt), 0, 0, (float*)inputs[0], (float*)outputs[0],
pxmnUEWGnfCxJNuDkXAo, XGQjNlvPuckcHnviTrkP, WbTBQxsNsCURmwRhNTAD,
YmfPcXPXNFZDznkzKZrl, YMNbgnUYZspjMLjwcIOS,
YDoginwuwFxabuYCVqpT); } else { hipLaunchKernelGGL((
scale_matrix2d_kernel), dim3(OJTEGflbxqozjWWEaUJd),
dim3(uqHugYAAqkSnCCYonqCt), 0, 0, (float*)inputs[0], (float*)outputs[0],
pxmnUEWGnfCxJNuDkXAo, XGQjNlvPuckcHnviTrkP, YmfPcXPXNFZDznkzKZrl,
YMNbgnUYZspjMLjwcIOS, YDoginwuwFxabuYCVqpT); } if (hqbKXLMjsDxRQqyJEgbg
== 1) {hipLaunchKernelGGL(( offset_scalar_kernel), dim3(OJTEGflbxqozjWWEaUJd),
dim3(uqHugYAAqkSnCCYonqCt), 0, 0, (float*)outputs[0], (float*)outputs[0],
hYTzvgWajqchLzrmxjqn, YDoginwuwFxabuYCVqpT, ZqQxEyCjEixByRZYMkbj,
crKSAZwnyiinNFYODxoN, vmBqKEmdajzGggqevoGl); } else if (hqVFaqkobRNLQNgtbaai
== 1 && ikTyjLTPRBkBRlLSyxXG == 1 && hqbKXLMjsDxRQqyJEgbg > 1) { hipLaunchKernelGGL((
offset_vector_kernel), dim3(OJTEGflbxqozjWWEaUJd), dim3(uqHugYAAqkSnCCYonqCt), 0, 0,
(float*)outputs[0], (float*)outputs[0], hYTzvgWajqchLzrmxjqn,
YmfPcXPXNFZDznkzKZrl, YMNbgnUYZspjMLjwcIOS,
YDoginwuwFxabuYCVqpT, ZqQxEyCjEixByRZYMkbj, crKSAZwnyiinNFYODxoN,
vmBqKEmdajzGggqevoGl); } else if (YMNbgnUYZspjMLjwcIOS ==
hqbKXLMjsDxRQqyJEgbg) {hipLaunchKernelGGL(( offset_tensor3d_kernel), dim3(OJTEGflbxqozjWWEaUJd),
dim3(uqHugYAAqkSnCCYonqCt), 0, 0, (float*)outputs[0], (float*)outputs[0],
hYTzvgWajqchLzrmxjqn, XGQjNlvPuckcHnviTrkP, WbTBQxsNsCURmwRhNTAD,
YmfPcXPXNFZDznkzKZrl, YMNbgnUYZspjMLjwcIOS,
YDoginwuwFxabuYCVqpT, ZqQxEyCjEixByRZYMkbj, crKSAZwnyiinNFYODxoN,
vmBqKEmdajzGggqevoGl); } else { hipLaunchKernelGGL((
offset_matrix2d_kernel), dim3(OJTEGflbxqozjWWEaUJd),
dim3(uqHugYAAqkSnCCYonqCt), 0, 0, (float*)outputs[0], (float*)outputs[0],
hYTzvgWajqchLzrmxjqn, XGQjNlvPuckcHnviTrkP, YmfPcXPXNFZDznkzKZrl,
YMNbgnUYZspjMLjwcIOS, YDoginwuwFxabuYCVqpT, ZqQxEyCjEixByRZYMkbj,
crKSAZwnyiinNFYODxoN, vmBqKEmdajzGggqevoGl); } return 0; } void
MWElementwiseAffineLayerImpl::loadScale(const char* sDWnRjToSPjYnOQzVfhS) {
FILE* SZPsAnAecHGeFCSHofdG = MWCNNLayer::openBinaryFile(sDWnRjToSPjYnOQzVfhS);
assert(SZPsAnAecHGeFCSHofdG); long int eYGiuTCCxjmoBDvVpHpn =
rZyMIPooLjRiXLgSWDuw*rwPhFWHcKnJsClVtebGW*qquNiJHQtfSLDMNCPIBJ; sCDdEyIOjXBVHhcakBhd
= MALLOC_CALL(sizeof(float)*eYGiuTCCxjmoBDvVpHpn); call_fread(sCDdEyIOjXBVHhcakBhd,
sizeof(float), eYGiuTCCxjmoBDvVpHpn, SZPsAnAecHGeFCSHofdG, sDWnRjToSPjYnOQzVfhS);
CUDA_CALL(hipMemcpy(pxmnUEWGnfCxJNuDkXAo, sCDdEyIOjXBVHhcakBhd,
sizeof(float)*eYGiuTCCxjmoBDvVpHpn, hipMemcpyHostToDevice)); fclose(SZPsAnAecHGeFCSHofdG);
} void MWElementwiseAffineLayerImpl::loadOffset(const char*
jNxFsuLXTFYGOUlfRwLW) { FILE* SZPsAnAecHGeFCSHofdG =
MWCNNLayer::openBinaryFile(jNxFsuLXTFYGOUlfRwLW); assert(SZPsAnAecHGeFCSHofdG); long
int eYGiuTCCxjmoBDvVpHpn =
hqVFaqkobRNLQNgtbaai*ikTyjLTPRBkBRlLSyxXG*hpOzCTZasBMYKoXVxMDZ;
jLmklYtHcmTxayQTpmRw = MALLOC_CALL(sizeof(float)*eYGiuTCCxjmoBDvVpHpn);
call_fread(jLmklYtHcmTxayQTpmRw, sizeof(float), eYGiuTCCxjmoBDvVpHpn, SZPsAnAecHGeFCSHofdG,
jNxFsuLXTFYGOUlfRwLW); CUDA_CALL(hipMemcpy(hYTzvgWajqchLzrmxjqn,
jLmklYtHcmTxayQTpmRw, sizeof(float)*eYGiuTCCxjmoBDvVpHpn, hipMemcpyHostToDevice));
fclose(SZPsAnAecHGeFCSHofdG); } void MWElementwiseAffineLayerImpl::cleanup() { if
(pxmnUEWGnfCxJNuDkXAo) { CUDA_FREE_CALL(pxmnUEWGnfCxJNuDkXAo); } if (hYTzvgWajqchLzrmxjqn)
{ CUDA_FREE_CALL(hYTzvgWajqchLzrmxjqn); } if (sCDdEyIOjXBVHhcakBhd)
free(sCDdEyIOjXBVHhcakBhd); if (jLmklYtHcmTxayQTpmRw)
free(jLmklYtHcmTxayQTpmRw); } | b894f6da20b3afdfc1a4eafef3bd97a9eab59636.cu | #include "MWElementwiseAffineLayerImpl.hpp"
#include "MWTargetNetworkImpl.hpp"
#include "cnn_api.hpp"
#include "MWKernelHeaders.hpp"
MWElementwiseAffineLayerImpl::MWElementwiseAffineLayerImpl(MWCNNLayer* layer,
MWTargetNetworkImpl* ntwk_impl, int scale_H, int scale_W, int scale_C, int
offset_H, int offset_W, int offset_C, bool isClipped, int lowerbound, int
upperbound, const char* scale_file, const char* offset_file, int ) :
MWCNNLayerImpl(layer, ntwk_impl), pxmnUEWGnfCxJNuDkXAo(NULL),
hYTzvgWajqchLzrmxjqn(NULL), rZyMIPooLjRiXLgSWDuw(scale_H),
rwPhFWHcKnJsClVtebGW(scale_W), qquNiJHQtfSLDMNCPIBJ(scale_C),
hqVFaqkobRNLQNgtbaai(offset_H), ikTyjLTPRBkBRlLSyxXG(offset_W),
hpOzCTZasBMYKoXVxMDZ(offset_C), ZqQxEyCjEixByRZYMkbj(isClipped),
crKSAZwnyiinNFYODxoN(lowerbound), vmBqKEmdajzGggqevoGl(upperbound),
sCDdEyIOjXBVHhcakBhd(nullptr), jLmklYtHcmTxayQTpmRw(nullptr),
qJWXFXvcpbSwehmlTNru(0), GrowsTaKrpHVUZdgZeJW(0), mJnXzwDFPTieqFtWcZIG(0) {
loadScaleAndOffset(scale_file, offset_file); setLayerProperties(); bool
isMatrix2d = (rZyMIPooLjRiXLgSWDuw > 1) && (rwPhFWHcKnJsClVtebGW > 1) &&
(qquNiJHQtfSLDMNCPIBJ != WawamKKnqecNqBXIyHIl); if ((!ZqQxEyCjEixByRZYMkbj) &&
(reGtUwUlPSwEenEBVIzH == hqbKXLMjsDxRQqyJEgbg ) && !isMatrix2d &&
(!eWYFXrUazhqiEIscccda->isSequenceNetwork)) { qeQuIDaHqnxGPDbPoQJF.values
= sCDdEyIOjXBVHhcakBhd; qeQuIDaHqnxGPDbPoQJF.count = reGtUwUlPSwEenEBVIzH;
qeQuIDaHqnxGPDbPoQJF.type = DataType::kFLOAT;
pKmXpiCPxZwpmXlulovZ.values = nullptr; pKmXpiCPxZwpmXlulovZ.count =
0; pKmXpiCPxZwpmXlulovZ.type = DataType::kFLOAT;
suFVgcuEVpCOrewbJfkB.values = jLmklYtHcmTxayQTpmRw;
suFVgcuEVpCOrewbJfkB.count = hqbKXLMjsDxRQqyJEgbg;
suFVgcuEVpCOrewbJfkB.type = DataType::kFLOAT; ITensor* prevLayerTensor =
getInputITensor(0); ScaleMode mode; if (reGtUwUlPSwEenEBVIzH == 1) mode =
ScaleMode::kUNIFORM; else if (YMNbgnUYZspjMLjwcIOS ==
reGtUwUlPSwEenEBVIzH) mode = ScaleMode::kELEMENTWISE; else if (rZyMIPooLjRiXLgSWDuw
== 1 && rwPhFWHcKnJsClVtebGW == 1 && reGtUwUlPSwEenEBVIzH == qquNiJHQtfSLDMNCPIBJ)
mode = ScaleMode::kCHANNEL; qJWXFXvcpbSwehmlTNru =
eWYFXrUazhqiEIscccda->network->addScale(*prevLayerTensor, mode,
suFVgcuEVpCOrewbJfkB, qeQuIDaHqnxGPDbPoQJF,
pKmXpiCPxZwpmXlulovZ); assert(qJWXFXvcpbSwehmlTNru);
qJWXFXvcpbSwehmlTNru->setName(getLayer()->getName().c_str());
setOpTensorPtr(qJWXFXvcpbSwehmlTNru->getOutput(0)); } else { ITensor*
prevLayerTensor = getInputITensor(0); mJnXzwDFPTieqFtWcZIG = new
MWPluginInterfaceImpl(this); GrowsTaKrpHVUZdgZeJW =
eWYFXrUazhqiEIscccda->network->addPlugin(&prevLayerTensor, 1,
*mJnXzwDFPTieqFtWcZIG);
GrowsTaKrpHVUZdgZeJW->setName(getLayer()->getName().c_str());
setOpTensorPtr(GrowsTaKrpHVUZdgZeJW->getOutput(0)); } } void
MWElementwiseAffineLayerImpl::loadScaleAndOffset(const char*
sDWnRjToSPjYnOQzVfhS, const char* jNxFsuLXTFYGOUlfRwLW){
CUDA_CALL(cudaMalloc((void**)&pxmnUEWGnfCxJNuDkXAo,
sizeof(float)*rZyMIPooLjRiXLgSWDuw*rwPhFWHcKnJsClVtebGW*qquNiJHQtfSLDMNCPIBJ));
CUDA_CALL(cudaMalloc((void**)&hYTzvgWajqchLzrmxjqn,
sizeof(float)*hqVFaqkobRNLQNgtbaai*ikTyjLTPRBkBRlLSyxXG*hpOzCTZasBMYKoXVxMDZ));
loadScale(sDWnRjToSPjYnOQzVfhS); loadOffset(jNxFsuLXTFYGOUlfRwLW); } void
MWElementwiseAffineLayerImpl::setLayerProperties(){ WbTBQxsNsCURmwRhNTAD =
getLayer()->getInputTensor(0)->getHeight(); XGQjNlvPuckcHnviTrkP =
getLayer()->getInputTensor(0)->getWidth(); WawamKKnqecNqBXIyHIl =
getLayer()->getInputTensor(0)->getChannels(); YmfPcXPXNFZDznkzKZrl =
WbTBQxsNsCURmwRhNTAD*XGQjNlvPuckcHnviTrkP; YMNbgnUYZspjMLjwcIOS =
YmfPcXPXNFZDznkzKZrl*WawamKKnqecNqBXIyHIl; YDoginwuwFxabuYCVqpT =
getLayer()->getInputTensor(0)->getNumElements(); reGtUwUlPSwEenEBVIzH =
rZyMIPooLjRiXLgSWDuw * rwPhFWHcKnJsClVtebGW * qquNiJHQtfSLDMNCPIBJ;
hqbKXLMjsDxRQqyJEgbg = hqVFaqkobRNLQNgtbaai * ikTyjLTPRBkBRlLSyxXG *
hpOzCTZasBMYKoXVxMDZ; assert(reGtUwUlPSwEenEBVIzH <= YDoginwuwFxabuYCVqpT);
assert(hqbKXLMjsDxRQqyJEgbg <= YDoginwuwFxabuYCVqpT); } int
MWElementwiseAffineLayerImpl::pluginEnqueueImpl(const void* const* inputs,
void** outputs) { long int uTUuLVVebDakbPjXOQwp = ((YDoginwuwFxabuYCVqpT + 31) / 32)
* 32; long int uqHugYAAqkSnCCYonqCt = (uTUuLVVebDakbPjXOQwp < 1024) ?
uTUuLVVebDakbPjXOQwp : 1024; long int OJTEGflbxqozjWWEaUJd =
(YDoginwuwFxabuYCVqpT + uqHugYAAqkSnCCYonqCt - 1) /
uqHugYAAqkSnCCYonqCt; if (reGtUwUlPSwEenEBVIzH == 1) {
scale_scalar_kernel<<<OJTEGflbxqozjWWEaUJd, uqHugYAAqkSnCCYonqCt>>>(
(float*)inputs[0], (float*)outputs[0], pxmnUEWGnfCxJNuDkXAo,
YDoginwuwFxabuYCVqpT); } else if (rZyMIPooLjRiXLgSWDuw == 1 && rwPhFWHcKnJsClVtebGW
== 1 && reGtUwUlPSwEenEBVIzH > 1) {
scale_vector_kernel<<<OJTEGflbxqozjWWEaUJd, uqHugYAAqkSnCCYonqCt>>>(
(float*)inputs[0], (float*)outputs[0], pxmnUEWGnfCxJNuDkXAo,
YmfPcXPXNFZDznkzKZrl, YMNbgnUYZspjMLjwcIOS,
YDoginwuwFxabuYCVqpT); } else if (YMNbgnUYZspjMLjwcIOS ==
reGtUwUlPSwEenEBVIzH) { scale_tensor3d_kernel<<<OJTEGflbxqozjWWEaUJd,
uqHugYAAqkSnCCYonqCt>>>( (float*)inputs[0], (float*)outputs[0],
pxmnUEWGnfCxJNuDkXAo, XGQjNlvPuckcHnviTrkP, WbTBQxsNsCURmwRhNTAD,
YmfPcXPXNFZDznkzKZrl, YMNbgnUYZspjMLjwcIOS,
YDoginwuwFxabuYCVqpT); } else {
scale_matrix2d_kernel<<<OJTEGflbxqozjWWEaUJd,
uqHugYAAqkSnCCYonqCt>>>( (float*)inputs[0], (float*)outputs[0],
pxmnUEWGnfCxJNuDkXAo, XGQjNlvPuckcHnviTrkP, YmfPcXPXNFZDznkzKZrl,
YMNbgnUYZspjMLjwcIOS, YDoginwuwFxabuYCVqpT); } if (hqbKXLMjsDxRQqyJEgbg
== 1) { offset_scalar_kernel<<<OJTEGflbxqozjWWEaUJd,
uqHugYAAqkSnCCYonqCt>>>( (float*)outputs[0], (float*)outputs[0],
hYTzvgWajqchLzrmxjqn, YDoginwuwFxabuYCVqpT, ZqQxEyCjEixByRZYMkbj,
crKSAZwnyiinNFYODxoN, vmBqKEmdajzGggqevoGl); } else if (hqVFaqkobRNLQNgtbaai
== 1 && ikTyjLTPRBkBRlLSyxXG == 1 && hqbKXLMjsDxRQqyJEgbg > 1) {
offset_vector_kernel<<<OJTEGflbxqozjWWEaUJd, uqHugYAAqkSnCCYonqCt>>>(
(float*)outputs[0], (float*)outputs[0], hYTzvgWajqchLzrmxjqn,
YmfPcXPXNFZDznkzKZrl, YMNbgnUYZspjMLjwcIOS,
YDoginwuwFxabuYCVqpT, ZqQxEyCjEixByRZYMkbj, crKSAZwnyiinNFYODxoN,
vmBqKEmdajzGggqevoGl); } else if (YMNbgnUYZspjMLjwcIOS ==
hqbKXLMjsDxRQqyJEgbg) { offset_tensor3d_kernel<<<OJTEGflbxqozjWWEaUJd,
uqHugYAAqkSnCCYonqCt>>>( (float*)outputs[0], (float*)outputs[0],
hYTzvgWajqchLzrmxjqn, XGQjNlvPuckcHnviTrkP, WbTBQxsNsCURmwRhNTAD,
YmfPcXPXNFZDznkzKZrl, YMNbgnUYZspjMLjwcIOS,
YDoginwuwFxabuYCVqpT, ZqQxEyCjEixByRZYMkbj, crKSAZwnyiinNFYODxoN,
vmBqKEmdajzGggqevoGl); } else {
offset_matrix2d_kernel<<<OJTEGflbxqozjWWEaUJd,
uqHugYAAqkSnCCYonqCt>>>( (float*)outputs[0], (float*)outputs[0],
hYTzvgWajqchLzrmxjqn, XGQjNlvPuckcHnviTrkP, YmfPcXPXNFZDznkzKZrl,
YMNbgnUYZspjMLjwcIOS, YDoginwuwFxabuYCVqpT, ZqQxEyCjEixByRZYMkbj,
crKSAZwnyiinNFYODxoN, vmBqKEmdajzGggqevoGl); } return 0; } void
MWElementwiseAffineLayerImpl::loadScale(const char* sDWnRjToSPjYnOQzVfhS) {
FILE* SZPsAnAecHGeFCSHofdG = MWCNNLayer::openBinaryFile(sDWnRjToSPjYnOQzVfhS);
assert(SZPsAnAecHGeFCSHofdG); long int eYGiuTCCxjmoBDvVpHpn =
rZyMIPooLjRiXLgSWDuw*rwPhFWHcKnJsClVtebGW*qquNiJHQtfSLDMNCPIBJ; sCDdEyIOjXBVHhcakBhd
= MALLOC_CALL(sizeof(float)*eYGiuTCCxjmoBDvVpHpn); call_fread(sCDdEyIOjXBVHhcakBhd,
sizeof(float), eYGiuTCCxjmoBDvVpHpn, SZPsAnAecHGeFCSHofdG, sDWnRjToSPjYnOQzVfhS);
CUDA_CALL(cudaMemcpy(pxmnUEWGnfCxJNuDkXAo, sCDdEyIOjXBVHhcakBhd,
sizeof(float)*eYGiuTCCxjmoBDvVpHpn, cudaMemcpyHostToDevice)); fclose(SZPsAnAecHGeFCSHofdG);
} void MWElementwiseAffineLayerImpl::loadOffset(const char*
jNxFsuLXTFYGOUlfRwLW) { FILE* SZPsAnAecHGeFCSHofdG =
MWCNNLayer::openBinaryFile(jNxFsuLXTFYGOUlfRwLW); assert(SZPsAnAecHGeFCSHofdG); long
int eYGiuTCCxjmoBDvVpHpn =
hqVFaqkobRNLQNgtbaai*ikTyjLTPRBkBRlLSyxXG*hpOzCTZasBMYKoXVxMDZ;
jLmklYtHcmTxayQTpmRw = MALLOC_CALL(sizeof(float)*eYGiuTCCxjmoBDvVpHpn);
call_fread(jLmklYtHcmTxayQTpmRw, sizeof(float), eYGiuTCCxjmoBDvVpHpn, SZPsAnAecHGeFCSHofdG,
jNxFsuLXTFYGOUlfRwLW); CUDA_CALL(cudaMemcpy(hYTzvgWajqchLzrmxjqn,
jLmklYtHcmTxayQTpmRw, sizeof(float)*eYGiuTCCxjmoBDvVpHpn, cudaMemcpyHostToDevice));
fclose(SZPsAnAecHGeFCSHofdG); } void MWElementwiseAffineLayerImpl::cleanup() { if
(pxmnUEWGnfCxJNuDkXAo) { CUDA_FREE_CALL(pxmnUEWGnfCxJNuDkXAo); } if (hYTzvgWajqchLzrmxjqn)
{ CUDA_FREE_CALL(hYTzvgWajqchLzrmxjqn); } if (sCDdEyIOjXBVHhcakBhd)
free(sCDdEyIOjXBVHhcakBhd); if (jLmklYtHcmTxayQTpmRw)
free(jLmklYtHcmTxayQTpmRw); } |
ef46bd5801550573e4901a6abe8d37eb7023290e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _DEV_EVALUATE_GRAVITY_CU
#define _DEV_EVALUATE_GRAVITY_CU
#ifndef __DEVICE_EMULATION__
#define _DEVICE_CODE_
#endif
#include "octgravdefs.h"
#include "dev_octgrav_tex.cuh"
#ifndef LMEM_STACK_SIZE
#define LMEM_STACK_SIZE 256
#endif
#define LEAF_BIT (1 << (24))
__device__ bool open_node(float4 cell_com,
float4 cell_pos,
float4 node_pos,
float4 node_com) {
float3 dr = {fabs(node_com.x - cell_pos.x) - cell_pos.w,
fabs(node_com.y - cell_pos.y) - cell_pos.w,
fabs(node_com.z - cell_pos.z) - cell_pos.w};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
float ds = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z);
return ( 2.0f*node_pos.w*inv_opening_angle > ds - cell_com.w);
}
/**********************************************
* compute length of the interaction list *
**********************************************/
template<int octant>
__device__ int4 interact_len(int node, int node_old,
float4 cell_com,
float4 cell_pos,
int *ids_stack,
int4 stack) {
/* if empty, exit */
if (node == 0) return stack;
/* check if the leaf or node has to be opened */
float4 node_pos = tex1Dfetch(node_pos_tex, (node_old << (2)) + octant);
float4 node_com = tex1Dfetch(node_com_tex, (node_old << (2)) + octant);
stack.w += 8;
if (open_node(cell_com, cell_pos, node_pos, node_com)) {
if ((node & LEAF_BIT) == 0) { /* if node, */
ids_stack[stack.x] = node; /* store it in stack */
stack.x++;
stack.w += 1;
} else {
stack.z++; /* otherwise account for this leaf */
}
} else {
stack.y++; /* account for the node */
}
return stack;
}
__device__ int3 compute_interaction_list_len(float4 cell_com,
float4 cell_pos) {
int ids_stack[LMEM_STACK_SIZE];
int node = 0;
int4 stack = {0,0,0,0};
ids_stack[stack.x] = node;
stack.w += 1;
stack.x++;
while(stack.x > 0) {
/* read node id & pos */
stack.x--;
node = ids_stack[stack.x];
stack.w += 1; /* 1 for id & 4 for pos */
int4 up = tex1Dfetch(children_tex, node + 0);
int4 dn = tex1Dfetch(children_tex, node + 1);
stack.w += 8;
#define INTERACT_LEN(oct, child) \
{stack = interact_len<oct>(child, \
node, \
cell_com, \
cell_pos, \
ids_stack, \
stack);}
INTERACT_LEN(0, up.x);
INTERACT_LEN(1, up.y);
INTERACT_LEN(2, up.z);
INTERACT_LEN(3, up.w);
INTERACT_LEN(4, dn.x);
INTERACT_LEN(5, dn.y);
INTERACT_LEN(6, dn.z);
INTERACT_LEN(7, dn.w);
}
/*
* number of nodes,
* number of leaves,
* number of reads from + writes to memory.
*/
return make_int3(stack.y, stack.z, stack.w);
}
__global__ void dev_compute_interaction_list_len(int3 *interaction_list_len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_cells)
index = threadIdx.x;
float4 cell_com = tex1Dfetch(cell_com_tex, index);
float4 cell_pos = tex1Dfetch(cell_pos_tex, index);
cell_com.w = sqrtf((cell_com.x - cell_pos.x)*(cell_com.x - cell_pos.x)+
(cell_com.y - cell_pos.y)*(cell_com.y - cell_pos.y)+
(cell_com.z - cell_pos.z)*(cell_com.z - cell_pos.z));
interaction_list_len[index] =
compute_interaction_list_len(cell_com, cell_pos);
}
/****************************
* build interaction list *
****************************/
template<int octant>
__device__ int3 interact_bld(int node, int node_old,
float4 cell_com,
float4 cell_pos,
int *ids_stack,
int *interaction_node_list,
int *interaction_leaf_list,
int3 stack) {
if (node == 0) return stack;
float4 node_pos = tex1Dfetch(node_pos_tex, (node_old << (2)) + octant);
float4 node_com = tex1Dfetch(node_com_tex, (node_old << (2)) + octant);
if (open_node(cell_com, cell_pos, node_pos, node_com)) {
if ((node & LEAF_BIT) == 0) { /* if node, */
ids_stack[stack.x] = node; /* store it in stack */
stack.x++;
} else {
interaction_leaf_list[stack.z++] = (node_old << (2)) + octant;
}
} else {
interaction_node_list[stack.y++] = (node_old << (2)) + octant;
}
return stack;
}
__device__ void build_interaction_list(float4 cell_com,
float4 cell_pos,
int *interaction_node_list,
int *interaction_leaf_list) {
int ids_stack[LMEM_STACK_SIZE];
int node = 0;
int3 stack = {0, 0, 0};
ids_stack[stack.x] = node;
stack.x++;
while(stack.x > 0) {
/* read node id */
stack.x--;
node = ids_stack[stack.x];
int4 up = tex1Dfetch(children_tex, node + 0);
int4 dn = tex1Dfetch(children_tex, node + 1);
#define INTERACT_BUILD(oct, child) \
{stack = interact_bld<oct>(child, \
node, \
cell_com, \
cell_pos, \
ids_stack, \
interaction_node_list, \
interaction_leaf_list, \
stack);}
INTERACT_BUILD(0, up.x);
INTERACT_BUILD(1, up.y);
INTERACT_BUILD(2, up.z);
INTERACT_BUILD(3, up.w);
INTERACT_BUILD(4, dn.x);
INTERACT_BUILD(5, dn.y);
INTERACT_BUILD(6, dn.z);
INTERACT_BUILD(7, dn.w);
}
}
__global__ void dev_build_interaction_list(int cell_offset,
int *interaction_node_list,
int2 *interaction_node_offset,
int *interaction_leaf_list,
int2 *interaction_leaf_offset) {
int index = cell_offset + (blockIdx.x * blockDim.x + threadIdx.x);
if (index < n_cells) {
float4 cell_com = tex1Dfetch(cell_com_tex, index);
float4 cell_pos = tex1Dfetch(cell_pos_tex, index);
cell_com.w = sqrtf((cell_com.x - cell_pos.x)*(cell_com.x - cell_pos.x)+
(cell_com.y - cell_pos.y)*(cell_com.y - cell_pos.y)+
(cell_com.z - cell_pos.z)*(cell_com.z - cell_pos.z));
build_interaction_list(cell_com, cell_pos,
&interaction_node_list[interaction_node_offset[index].x],
&interaction_leaf_list[interaction_leaf_offset[index].x]);
}
}
/**************************************************
***************************************************
*** ***
** evaluate gravity via the interaction list ***
*** ***
***************************************************
***************************************************/
/***************************/
/* body-body interaction */
/***************************/
__device__ float4 body_body_interaction(float4 grav, float4 body_i, float4 body_j) {
float3 dr;
dr.x = body_i.x - body_j.x;
dr.y = body_i.y - body_j.y;
dr.z = body_i.z - body_j.z;
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
float inv_ds = rsqrtf(ds2 + softening_squared) * (ds2 != 0.0f);
float inv_s3 = body_j.w * inv_ds*inv_ds*inv_ds;
grav.x -= inv_s3 * dr.x;
grav.y -= inv_s3 * dr.y;
grav.z -= inv_s3 * dr.z;
grav.w -= body_j.w * inv_ds;
return grav;
}
/***************************/
/* body-node Octupole interaction */
/***************************/
__device__ float4 body_node_Octupole(float4 grav,
float4 body_i,
float4 com,
float4 Oct1,
float4 Oct2,
float2 Oct3) {
float3 dr;
dr.x = body_i.x - com.x; // 1 FLOP
dr.y = body_i.y - com.y; // 1 FLOP
dr.z = body_i.z - com.z; // 1 FLOP
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; // 5 FLOP
float inv_ds = rsqrt(ds2 + softening_squared) * (ds2 != 0.0f); // 3 FLOP
float inv_ds2 = inv_ds*inv_ds; // 1 FLOP
float inv_ds3 = inv_ds *inv_ds2; // 1 FLOP
float inv_ds5 = inv_ds3*inv_ds2; // 1 FLOP
float inv_ds7 = 0.5f*inv_ds5*inv_ds2; // 2 FLOP
float SijRj1 = Oct1.x*dr.x + Oct1.y*dr.y + Oct1.z*dr.z; // 5 FLOP
float SijRj2 = Oct2.x*dr.x + Oct2.y*dr.y + Oct2.z*dr.z; // 5 FLOP
float SijRj3 = Oct1.w*dr.x + Oct2.w*dr.y + Oct3.x*dr.z; // 5 FLOP
float SijRjRi_sq = SijRj1 * dr.x*dr.x + SijRj2 * dr.y*dr.y + SijRj3 * dr.z*dr.z; // 8 FLOP
/******************/
/*** POTENTIAL ***/
/******************/
float pot = inv_ds7 * (SijRjRi_sq + Oct3.y*dr.x*dr.y*dr.z); // 5 FLOP
grav.w -= pot; // 1 FLOP
/*********************/
/*** ACCELERATION ***/
/*********************/
/*** part 1 ***/
float3 grav0 = {0.0f,0.0f,0.0f};
grav0.x -= 7.0f*inv_ds2 * dr.x * pot; // 4 FLOP
grav0.y -= 7.0f*inv_ds2 * dr.y * pot; // 4 FLOP
grav0.z -= 7.0f*inv_ds2 * dr.z * pot; // 4 FLOP
/*** part 2 ***/
/* S11*dx^2 + S21*dy^2 + S31*dz^2 */
/* S12*dx^2 + S22*dy^2 + S32*dz^2 */
/* S13*dx^2 + S23*dy^2 + S33*dz^2 */
grav0.x += inv_ds7 * (2.0f*SijRj1*dr.x + dr.x*dr.x*Oct1.x + dr.y*dr.y*Oct2.x + dr.z*dr.z*Oct1.w); // 13 FLOP
grav0.y += inv_ds7 * (2.0f*SijRj2*dr.y + dr.x*dr.x*Oct1.y + dr.y*dr.y*Oct2.y + dr.z*dr.z*Oct2.w); // 13 FLOP
grav0.y += inv_ds7 * (2.0f*SijRj3*dr.z + dr.x*dr.x*Oct1.z + dr.y*dr.y*Oct2.z + dr.z*dr.z*Oct3.x); // 13 FLOP
/*** part 2 ***/
grav0.x += inv_ds7*Oct3.y * dr.y*dr.z; // 4 FLOP
grav0.y += inv_ds7*Oct3.y * dr.z*dr.x; // 4 FLOP
grav0.z += inv_ds7*Oct3.y * dr.x*dr.y; // 4 FLOP
grav.x += grav0.x;
grav.y += grav0.y;
grav.z += grav0.z;
// TOTAL 108 FLOP
return grav;
}
__device__ float4 evaluate_body_node_Octupole(float4 acc,
float4 body_pos,
int &n_inter,
int2 list_len) {
extern __shared__ float4 shared_com[];
float4 *shared_Oct1 = &shared_com[blockDim.x];
float4 *shared_Oct2 = &shared_Oct1[blockDim.x];
float2 *shared_Oct3 = (float2*)&shared_Oct2[blockDim.x];
n_inter = 0;
for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x) {
int node = tex1Dfetch(interaction_node_tex, i + threadIdx.x);
if ( (node < 0) || (node >= n_nodes) ) node = 0;
shared_com[threadIdx.x] = tex1Dfetch(node_com_tex, node);
shared_Oct1[threadIdx.x] = tex1Dfetch(Oct1_tex, node);
shared_Oct2[threadIdx.x] = tex1Dfetch(Oct2_tex, node);
shared_Oct3[threadIdx.x] = tex1Dfetch(Oct3_tex, node);
if (i + threadIdx.x >= list_len.x + list_len.y) {
float4 null4 = {0,0,0,0};
float2 null2 = {0,0};
shared_Oct1[threadIdx.x] = null4;
shared_Oct2[threadIdx.x] = null4;
shared_Oct3[threadIdx.x] = null2;
}
__syncthreads();
/* check for body-node interaction */
for (int j = 0; j < blockDim.x; j++) {
n_inter++;
acc = body_node_Octupole(acc, body_pos, shared_com[j],
shared_Oct1[j], shared_Oct2[j], shared_Oct3[j]);
}
__syncthreads();
}
return acc;
}
/***************************/
/* body-node interaction */
/***************************/
__device__ float4 body_node_interaction(float4 grav,
float4 body_i,
float4 com,
float4 Qu, float4 Qd) {
float3 dr;
dr.x = body_i.x - com.x; // 1 FLOP
dr.y = body_i.y - com.y; // 1 FLOP
dr.z = body_i.z - com.z; // 1 FLOP
float ds2 = (((dr.x*dr.x) + dr.y*dr.y) + dr.z*dr.z); // 5 FLOP
float inv_ds = rsqrt(ds2 + softening_squared) * (ds2 != 0.0f) ; // 3 FLOP
float inv_ds2 = inv_ds*inv_ds; // 1 FLOP
float inv_ds3 = inv_ds *inv_ds2; // 1 FLOP
float inv_ds5 = inv_ds3*inv_ds2; // 1 FLOP
/************/
/* potential */
/************/
grav.w -= com.w * inv_ds; // 2 FLOP
float Qy0 = inv_ds5 * (Qd.x*dr.x + Qu.x*dr.y + Qu.y*dr.z); // 6 FLOP
float Qy1 = inv_ds5 * (Qu.x*dr.x + Qd.y*dr.y + Qu.z*dr.z); // 6 FLOP
float Qy2 = inv_ds5 * (Qu.y*dr.x + Qu.z*dr.y + Qd.z*dr.z); // 6 FLOP
float yQy = Qy0 * dr.x + Qy1 * dr.y + Qy2 * dr.z; // 5 FLOP
grav.w -= 0.5f * yQy; // 2 FLOP
/* acceleartion */
yQy = com.w * inv_ds3 + inv_ds2*2.5f * yQy; // 4 FLOP
grav.x += Qy0 - yQy * dr.x; // 3 FLOPS
grav.y += Qy1 - yQy * dr.y; // 3 FLOPS
grav.z += Qy2 - yQy * dr.z; // 3 FLOPS
// TOTAL 54 FLOP
return grav;
}
__device__ float4 evaluate_body_node(float4 acc,
float4 body_pos,
int &n_inter,
int2 list_len,
int n_in_cell) {
extern __shared__ float4 shared_com[];
float4 *shared_Qu = &shared_com[blockDim.x];
float4 *shared_Qd = &shared_Qu [blockDim.x];
n_inter = 0;
int i_thread = threadIdx.x/n_in_cell;
int n_threads = blockDim.x/n_in_cell;
int n_per_thread = blockDim.x/n_threads;
int j0 = i_thread * n_per_thread;
int j1 = (i_thread+1) * n_per_thread;
if (i_thread + 1 == n_threads)
j1 = blockDim.x;
for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x) {
int node = tex1Dfetch(interaction_node_tex, i + threadIdx.x);
if ( (node < 0) || (node >= n_nodes) ) node = 0;
shared_com[threadIdx.x] = tex1Dfetch(node_com_tex, node);
shared_Qu[threadIdx.x] = tex1Dfetch(node_Qu_tex, node);
shared_Qd[threadIdx.x] = tex1Dfetch(node_Qd_tex, node);
if (i + threadIdx.x >= list_len.x + list_len.y) {
float4 null4 = {0.0f,0.0f,0.0f,0.0f};
shared_com[threadIdx.x] = null4;
shared_Qu [threadIdx.x] = null4;
shared_Qd [threadIdx.x] = null4;
}
__syncthreads();
/* check for body-node interaction */
for (int j = j0; j < j1; j++) {
n_inter++;
acc = body_node_interaction(acc, body_pos,
shared_com[j],
shared_Qu[j], shared_Qd[j]);
}
__syncthreads();
}
/*** now combine accelarations ****/
int *n_inter_sh = (int*)&shared_com[blockDim.x + 1];
shared_com[threadIdx.x] = acc;
n_inter_sh[threadIdx.x] = n_inter;
__syncthreads();
if (threadIdx.x < n_in_cell) {
for (int i = n_in_cell + threadIdx.x; i < n_in_cell*n_threads; i += n_in_cell) {
float4 acc1 = shared_com[i];
acc.x += acc1.x;
acc.y += acc1.y;
acc.z += acc1.z;
acc.w += acc1.w;
}
for (int i = n_in_cell + threadIdx.x; i < blockDim.x; i += n_in_cell) {
n_inter += n_inter_sh[i];
}
}
__syncthreads();
return acc;
}
__device__ float4 evaluate_body_leaf(float4 acc,
float4 body_pos,
int &n_inter,
int2 list_len) {
extern __shared__ int shared_offset[];
int *shared_len = (int*)&shared_offset[blockDim.x];
float4 *shared_pos = (float4*)&shared_len[blockDim.x];
n_inter = 0;
int tile = 0;
for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x, tile++) {
int node_id = tex1Dfetch(interaction_leaf_tex, i + threadIdx.x);
shared_len [threadIdx.x] = tex1Dfetch(n_in_node_tex, node_id);
shared_offset[threadIdx.x] = tex1Dfetch(node_bodies_offset_tex, node_id);
__syncthreads();
int j = min(blockDim.x, list_len.y - tile*blockDim.x);
while (j-- > 0) {
int len = shared_len[j];
__syncthreads();
shared_pos[threadIdx.x] = tex1Dfetch(bodies_pos_tex, shared_offset[j] + threadIdx.x);
__syncthreads();
while(len-- > 0) {
n_inter++;
acc = body_body_interaction(acc, body_pos, shared_pos[len]);
}
__syncthreads();
}
__syncthreads();
}
return acc;
}
__global__ void dev_evaluate_gravity_node(int cell_offset,
float4 *grav_acc,
int *n_interactions,
int2 *interaction_node_len) {
int cellId = cell_offset + blockIdx.x;
bool write_flag = true;
if (cellId >= n_cells) {
cellId = blockIdx.x;
write_flag = false;
}
int index = tex1Dfetch(cell_bodies_offset_tex, cellId);
int n_in_cell = tex1Dfetch(n_in_cell_tex, cellId);
float4 body_pos = tex1Dfetch(bodies_pos_tex, index + threadIdx.x%n_in_cell);
float4 acc = {0,0,0,0};
int n_inter;
#ifdef OCTUPOLE
acc = evaluate_body_node_Octupole(acc, body_pos, n_inter,
interaction_node_len[cellId]);
#endif
#ifdef QUADRUPOLE
acc = evaluate_body_node(acc, body_pos, n_inter,
interaction_node_len[cellId],
n_in_cell);
#endif
if (threadIdx.x < n_in_cell) {
if (write_flag) {
grav_acc[index + threadIdx.x] = acc;
// fprintf(stderr, "cellId= %d index= %d n_in_cell= %d\n",
// cellId, index + threadIdx.x, n_in_cell);
// fprintf(stderr, " acc= [%f %f %f %f]\n", acc.x, acc.y, acc.z, acc.w);
}
n_interactions[index + threadIdx.x] = n_inter;
}
}
__global__ void dev_evaluate_gravity_leaf(int cell_offset,
float4 *grav_acc,
int *n_interactions,
int2 *interaction_leaf_len) {
int cellId = cell_offset + blockIdx.x;
bool write_flag = true;
if (cellId >= n_cells) {
cellId = blockIdx.x;
write_flag = false;
}
int index = tex1Dfetch(cell_bodies_offset_tex, cellId);
int n_in_cell = tex1Dfetch(n_in_cell_tex, cellId);
float4 body_pos = tex1Dfetch(bodies_pos_tex, index + threadIdx.x%n_in_cell);
float4 acc = grav_acc[index + threadIdx.x%n_in_cell];
int n_inter;
acc = evaluate_body_leaf(acc, body_pos, n_inter,
interaction_leaf_len[cellId]);
if (threadIdx.x < n_in_cell) {
if (write_flag)
grav_acc[index + threadIdx.x] = acc;
n_interactions[index + threadIdx.x] = n_inter;
}
}
#endif
| ef46bd5801550573e4901a6abe8d37eb7023290e.cu | #ifndef _DEV_EVALUATE_GRAVITY_CU
#define _DEV_EVALUATE_GRAVITY_CU
#ifndef __DEVICE_EMULATION__
#define _DEVICE_CODE_
#endif
#include "octgravdefs.h"
#include "dev_octgrav_tex.cuh"
#ifndef LMEM_STACK_SIZE
#define LMEM_STACK_SIZE 256
#endif
#define LEAF_BIT (1 << (24))
__device__ bool open_node(float4 cell_com,
float4 cell_pos,
float4 node_pos,
float4 node_com) {
float3 dr = {fabs(node_com.x - cell_pos.x) - cell_pos.w,
fabs(node_com.y - cell_pos.y) - cell_pos.w,
fabs(node_com.z - cell_pos.z) - cell_pos.w};
dr.x += fabs(dr.x); dr.x *= 0.5f;
dr.y += fabs(dr.y); dr.y *= 0.5f;
dr.z += fabs(dr.z); dr.z *= 0.5f;
float ds = sqrtf(dr.x*dr.x + dr.y*dr.y + dr.z*dr.z);
return ( 2.0f*node_pos.w*inv_opening_angle > ds - cell_com.w);
}
/**********************************************
* compute length of the interaction list *
**********************************************/
template<int octant>
__device__ int4 interact_len(int node, int node_old,
float4 cell_com,
float4 cell_pos,
int *ids_stack,
int4 stack) {
/* if empty, exit */
if (node == 0) return stack;
/* check if the leaf or node has to be opened */
float4 node_pos = tex1Dfetch(node_pos_tex, (node_old << (2)) + octant);
float4 node_com = tex1Dfetch(node_com_tex, (node_old << (2)) + octant);
stack.w += 8;
if (open_node(cell_com, cell_pos, node_pos, node_com)) {
if ((node & LEAF_BIT) == 0) { /* if node, */
ids_stack[stack.x] = node; /* store it in stack */
stack.x++;
stack.w += 1;
} else {
stack.z++; /* otherwise account for this leaf */
}
} else {
stack.y++; /* account for the node */
}
return stack;
}
__device__ int3 compute_interaction_list_len(float4 cell_com,
float4 cell_pos) {
int ids_stack[LMEM_STACK_SIZE];
int node = 0;
int4 stack = {0,0,0,0};
ids_stack[stack.x] = node;
stack.w += 1;
stack.x++;
while(stack.x > 0) {
/* read node id & pos */
stack.x--;
node = ids_stack[stack.x];
stack.w += 1; /* 1 for id & 4 for pos */
int4 up = tex1Dfetch(children_tex, node + 0);
int4 dn = tex1Dfetch(children_tex, node + 1);
stack.w += 8;
#define INTERACT_LEN(oct, child) \
{stack = interact_len<oct>(child, \
node, \
cell_com, \
cell_pos, \
ids_stack, \
stack);}
INTERACT_LEN(0, up.x);
INTERACT_LEN(1, up.y);
INTERACT_LEN(2, up.z);
INTERACT_LEN(3, up.w);
INTERACT_LEN(4, dn.x);
INTERACT_LEN(5, dn.y);
INTERACT_LEN(6, dn.z);
INTERACT_LEN(7, dn.w);
}
/*
* number of nodes,
* number of leaves,
* number of reads from + writes to memory.
*/
return make_int3(stack.y, stack.z, stack.w);
}
__global__ void dev_compute_interaction_list_len(int3 *interaction_list_len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n_cells)
index = threadIdx.x;
float4 cell_com = tex1Dfetch(cell_com_tex, index);
float4 cell_pos = tex1Dfetch(cell_pos_tex, index);
cell_com.w = sqrtf((cell_com.x - cell_pos.x)*(cell_com.x - cell_pos.x)+
(cell_com.y - cell_pos.y)*(cell_com.y - cell_pos.y)+
(cell_com.z - cell_pos.z)*(cell_com.z - cell_pos.z));
interaction_list_len[index] =
compute_interaction_list_len(cell_com, cell_pos);
}
/****************************
* build interaction list *
****************************/
template<int octant>
__device__ int3 interact_bld(int node, int node_old,
float4 cell_com,
float4 cell_pos,
int *ids_stack,
int *interaction_node_list,
int *interaction_leaf_list,
int3 stack) {
if (node == 0) return stack;
float4 node_pos = tex1Dfetch(node_pos_tex, (node_old << (2)) + octant);
float4 node_com = tex1Dfetch(node_com_tex, (node_old << (2)) + octant);
if (open_node(cell_com, cell_pos, node_pos, node_com)) {
if ((node & LEAF_BIT) == 0) { /* if node, */
ids_stack[stack.x] = node; /* store it in stack */
stack.x++;
} else {
interaction_leaf_list[stack.z++] = (node_old << (2)) + octant;
}
} else {
interaction_node_list[stack.y++] = (node_old << (2)) + octant;
}
return stack;
}
__device__ void build_interaction_list(float4 cell_com,
float4 cell_pos,
int *interaction_node_list,
int *interaction_leaf_list) {
int ids_stack[LMEM_STACK_SIZE];
int node = 0;
int3 stack = {0, 0, 0};
ids_stack[stack.x] = node;
stack.x++;
while(stack.x > 0) {
/* read node id */
stack.x--;
node = ids_stack[stack.x];
int4 up = tex1Dfetch(children_tex, node + 0);
int4 dn = tex1Dfetch(children_tex, node + 1);
#define INTERACT_BUILD(oct, child) \
{stack = interact_bld<oct>(child, \
node, \
cell_com, \
cell_pos, \
ids_stack, \
interaction_node_list, \
interaction_leaf_list, \
stack);}
INTERACT_BUILD(0, up.x);
INTERACT_BUILD(1, up.y);
INTERACT_BUILD(2, up.z);
INTERACT_BUILD(3, up.w);
INTERACT_BUILD(4, dn.x);
INTERACT_BUILD(5, dn.y);
INTERACT_BUILD(6, dn.z);
INTERACT_BUILD(7, dn.w);
}
}
__global__ void dev_build_interaction_list(int cell_offset,
int *interaction_node_list,
int2 *interaction_node_offset,
int *interaction_leaf_list,
int2 *interaction_leaf_offset) {
int index = cell_offset + (blockIdx.x * blockDim.x + threadIdx.x);
if (index < n_cells) {
float4 cell_com = tex1Dfetch(cell_com_tex, index);
float4 cell_pos = tex1Dfetch(cell_pos_tex, index);
cell_com.w = sqrtf((cell_com.x - cell_pos.x)*(cell_com.x - cell_pos.x)+
(cell_com.y - cell_pos.y)*(cell_com.y - cell_pos.y)+
(cell_com.z - cell_pos.z)*(cell_com.z - cell_pos.z));
build_interaction_list(cell_com, cell_pos,
&interaction_node_list[interaction_node_offset[index].x],
&interaction_leaf_list[interaction_leaf_offset[index].x]);
}
}
/**************************************************
***************************************************
*** ***
** evaluate gravity via the interaction list ***
*** ***
***************************************************
***************************************************/
/***************************/
/* body-body interaction */
/***************************/
__device__ float4 body_body_interaction(float4 grav, float4 body_i, float4 body_j) {
float3 dr;
dr.x = body_i.x - body_j.x;
dr.y = body_i.y - body_j.y;
dr.z = body_i.z - body_j.z;
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z;
float inv_ds = rsqrtf(ds2 + softening_squared) * (ds2 != 0.0f);
float inv_s3 = body_j.w * inv_ds*inv_ds*inv_ds;
grav.x -= inv_s3 * dr.x;
grav.y -= inv_s3 * dr.y;
grav.z -= inv_s3 * dr.z;
grav.w -= body_j.w * inv_ds;
return grav;
}
/***************************/
/* body-node Octupole interaction */
/***************************/
__device__ float4 body_node_Octupole(float4 grav,
float4 body_i,
float4 com,
float4 Oct1,
float4 Oct2,
float2 Oct3) {
float3 dr;
dr.x = body_i.x - com.x; // 1 FLOP
dr.y = body_i.y - com.y; // 1 FLOP
dr.z = body_i.z - com.z; // 1 FLOP
float ds2 = dr.x*dr.x + dr.y*dr.y + dr.z*dr.z; // 5 FLOP
float inv_ds = rsqrt(ds2 + softening_squared) * (ds2 != 0.0f); // 3 FLOP
float inv_ds2 = inv_ds*inv_ds; // 1 FLOP
float inv_ds3 = inv_ds *inv_ds2; // 1 FLOP
float inv_ds5 = inv_ds3*inv_ds2; // 1 FLOP
float inv_ds7 = 0.5f*inv_ds5*inv_ds2; // 2 FLOP
float SijRj1 = Oct1.x*dr.x + Oct1.y*dr.y + Oct1.z*dr.z; // 5 FLOP
float SijRj2 = Oct2.x*dr.x + Oct2.y*dr.y + Oct2.z*dr.z; // 5 FLOP
float SijRj3 = Oct1.w*dr.x + Oct2.w*dr.y + Oct3.x*dr.z; // 5 FLOP
float SijRjRi_sq = SijRj1 * dr.x*dr.x + SijRj2 * dr.y*dr.y + SijRj3 * dr.z*dr.z; // 8 FLOP
/******************/
/*** POTENTIAL ***/
/******************/
float pot = inv_ds7 * (SijRjRi_sq + Oct3.y*dr.x*dr.y*dr.z); // 5 FLOP
grav.w -= pot; // 1 FLOP
/*********************/
/*** ACCELERATION ***/
/*********************/
/*** part 1 ***/
float3 grav0 = {0.0f,0.0f,0.0f};
grav0.x -= 7.0f*inv_ds2 * dr.x * pot; // 4 FLOP
grav0.y -= 7.0f*inv_ds2 * dr.y * pot; // 4 FLOP
grav0.z -= 7.0f*inv_ds2 * dr.z * pot; // 4 FLOP
/*** part 2 ***/
/* S11*dx^2 + S21*dy^2 + S31*dz^2 */
/* S12*dx^2 + S22*dy^2 + S32*dz^2 */
/* S13*dx^2 + S23*dy^2 + S33*dz^2 */
grav0.x += inv_ds7 * (2.0f*SijRj1*dr.x + dr.x*dr.x*Oct1.x + dr.y*dr.y*Oct2.x + dr.z*dr.z*Oct1.w); // 13 FLOP
grav0.y += inv_ds7 * (2.0f*SijRj2*dr.y + dr.x*dr.x*Oct1.y + dr.y*dr.y*Oct2.y + dr.z*dr.z*Oct2.w); // 13 FLOP
grav0.y += inv_ds7 * (2.0f*SijRj3*dr.z + dr.x*dr.x*Oct1.z + dr.y*dr.y*Oct2.z + dr.z*dr.z*Oct3.x); // 13 FLOP
/*** part 2 ***/
grav0.x += inv_ds7*Oct3.y * dr.y*dr.z; // 4 FLOP
grav0.y += inv_ds7*Oct3.y * dr.z*dr.x; // 4 FLOP
grav0.z += inv_ds7*Oct3.y * dr.x*dr.y; // 4 FLOP
grav.x += grav0.x;
grav.y += grav0.y;
grav.z += grav0.z;
// TOTAL 108 FLOP
return grav;
}
__device__ float4 evaluate_body_node_Octupole(float4 acc,
float4 body_pos,
int &n_inter,
int2 list_len) {
extern __shared__ float4 shared_com[];
float4 *shared_Oct1 = &shared_com[blockDim.x];
float4 *shared_Oct2 = &shared_Oct1[blockDim.x];
float2 *shared_Oct3 = (float2*)&shared_Oct2[blockDim.x];
n_inter = 0;
for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x) {
int node = tex1Dfetch(interaction_node_tex, i + threadIdx.x);
if ( (node < 0) || (node >= n_nodes) ) node = 0;
shared_com[threadIdx.x] = tex1Dfetch(node_com_tex, node);
shared_Oct1[threadIdx.x] = tex1Dfetch(Oct1_tex, node);
shared_Oct2[threadIdx.x] = tex1Dfetch(Oct2_tex, node);
shared_Oct3[threadIdx.x] = tex1Dfetch(Oct3_tex, node);
if (i + threadIdx.x >= list_len.x + list_len.y) {
float4 null4 = {0,0,0,0};
float2 null2 = {0,0};
shared_Oct1[threadIdx.x] = null4;
shared_Oct2[threadIdx.x] = null4;
shared_Oct3[threadIdx.x] = null2;
}
__syncthreads();
/* check for body-node interaction */
for (int j = 0; j < blockDim.x; j++) {
n_inter++;
acc = body_node_Octupole(acc, body_pos, shared_com[j],
shared_Oct1[j], shared_Oct2[j], shared_Oct3[j]);
}
__syncthreads();
}
return acc;
}
/***************************/
/* body-node interaction */
/***************************/
__device__ float4 body_node_interaction(float4 grav,
float4 body_i,
float4 com,
float4 Qu, float4 Qd) {
float3 dr;
dr.x = body_i.x - com.x; // 1 FLOP
dr.y = body_i.y - com.y; // 1 FLOP
dr.z = body_i.z - com.z; // 1 FLOP
float ds2 = (((dr.x*dr.x) + dr.y*dr.y) + dr.z*dr.z); // 5 FLOP
float inv_ds = rsqrt(ds2 + softening_squared) * (ds2 != 0.0f) ; // 3 FLOP
float inv_ds2 = inv_ds*inv_ds; // 1 FLOP
float inv_ds3 = inv_ds *inv_ds2; // 1 FLOP
float inv_ds5 = inv_ds3*inv_ds2; // 1 FLOP
/************/
/* potential */
/************/
grav.w -= com.w * inv_ds; // 2 FLOP
float Qy0 = inv_ds5 * (Qd.x*dr.x + Qu.x*dr.y + Qu.y*dr.z); // 6 FLOP
float Qy1 = inv_ds5 * (Qu.x*dr.x + Qd.y*dr.y + Qu.z*dr.z); // 6 FLOP
float Qy2 = inv_ds5 * (Qu.y*dr.x + Qu.z*dr.y + Qd.z*dr.z); // 6 FLOP
float yQy = Qy0 * dr.x + Qy1 * dr.y + Qy2 * dr.z; // 5 FLOP
grav.w -= 0.5f * yQy; // 2 FLOP
/* acceleartion */
yQy = com.w * inv_ds3 + inv_ds2*2.5f * yQy; // 4 FLOP
grav.x += Qy0 - yQy * dr.x; // 3 FLOPS
grav.y += Qy1 - yQy * dr.y; // 3 FLOPS
grav.z += Qy2 - yQy * dr.z; // 3 FLOPS
// TOTAL 54 FLOP
return grav;
}
__device__ float4 evaluate_body_node(float4 acc,
float4 body_pos,
int &n_inter,
int2 list_len,
int n_in_cell) {
extern __shared__ float4 shared_com[];
float4 *shared_Qu = &shared_com[blockDim.x];
float4 *shared_Qd = &shared_Qu [blockDim.x];
n_inter = 0;
int i_thread = threadIdx.x/n_in_cell;
int n_threads = blockDim.x/n_in_cell;
int n_per_thread = blockDim.x/n_threads;
int j0 = i_thread * n_per_thread;
int j1 = (i_thread+1) * n_per_thread;
if (i_thread + 1 == n_threads)
j1 = blockDim.x;
for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x) {
int node = tex1Dfetch(interaction_node_tex, i + threadIdx.x);
if ( (node < 0) || (node >= n_nodes) ) node = 0;
shared_com[threadIdx.x] = tex1Dfetch(node_com_tex, node);
shared_Qu[threadIdx.x] = tex1Dfetch(node_Qu_tex, node);
shared_Qd[threadIdx.x] = tex1Dfetch(node_Qd_tex, node);
if (i + threadIdx.x >= list_len.x + list_len.y) {
float4 null4 = {0.0f,0.0f,0.0f,0.0f};
shared_com[threadIdx.x] = null4;
shared_Qu [threadIdx.x] = null4;
shared_Qd [threadIdx.x] = null4;
}
__syncthreads();
/* check for body-node interaction */
for (int j = j0; j < j1; j++) {
n_inter++;
acc = body_node_interaction(acc, body_pos,
shared_com[j],
shared_Qu[j], shared_Qd[j]);
}
__syncthreads();
}
/*** now combine accelarations ****/
int *n_inter_sh = (int*)&shared_com[blockDim.x + 1];
shared_com[threadIdx.x] = acc;
n_inter_sh[threadIdx.x] = n_inter;
__syncthreads();
if (threadIdx.x < n_in_cell) {
for (int i = n_in_cell + threadIdx.x; i < n_in_cell*n_threads; i += n_in_cell) {
float4 acc1 = shared_com[i];
acc.x += acc1.x;
acc.y += acc1.y;
acc.z += acc1.z;
acc.w += acc1.w;
}
for (int i = n_in_cell + threadIdx.x; i < blockDim.x; i += n_in_cell) {
n_inter += n_inter_sh[i];
}
}
__syncthreads();
return acc;
}
__device__ float4 evaluate_body_leaf(float4 acc,
float4 body_pos,
int &n_inter,
int2 list_len) {
extern __shared__ int shared_offset[];
int *shared_len = (int*)&shared_offset[blockDim.x];
float4 *shared_pos = (float4*)&shared_len[blockDim.x];
n_inter = 0;
int tile = 0;
for (int i = list_len.x; i < list_len.x + list_len.y; i += blockDim.x, tile++) {
int node_id = tex1Dfetch(interaction_leaf_tex, i + threadIdx.x);
shared_len [threadIdx.x] = tex1Dfetch(n_in_node_tex, node_id);
shared_offset[threadIdx.x] = tex1Dfetch(node_bodies_offset_tex, node_id);
__syncthreads();
int j = min(blockDim.x, list_len.y - tile*blockDim.x);
while (j-- > 0) {
int len = shared_len[j];
__syncthreads();
shared_pos[threadIdx.x] = tex1Dfetch(bodies_pos_tex, shared_offset[j] + threadIdx.x);
__syncthreads();
while(len-- > 0) {
n_inter++;
acc = body_body_interaction(acc, body_pos, shared_pos[len]);
}
__syncthreads();
}
__syncthreads();
}
return acc;
}
__global__ void dev_evaluate_gravity_node(int cell_offset,
float4 *grav_acc,
int *n_interactions,
int2 *interaction_node_len) {
int cellId = cell_offset + blockIdx.x;
bool write_flag = true;
if (cellId >= n_cells) {
cellId = blockIdx.x;
write_flag = false;
}
int index = tex1Dfetch(cell_bodies_offset_tex, cellId);
int n_in_cell = tex1Dfetch(n_in_cell_tex, cellId);
float4 body_pos = tex1Dfetch(bodies_pos_tex, index + threadIdx.x%n_in_cell);
float4 acc = {0,0,0,0};
int n_inter;
#ifdef OCTUPOLE
acc = evaluate_body_node_Octupole(acc, body_pos, n_inter,
interaction_node_len[cellId]);
#endif
#ifdef QUADRUPOLE
acc = evaluate_body_node(acc, body_pos, n_inter,
interaction_node_len[cellId],
n_in_cell);
#endif
if (threadIdx.x < n_in_cell) {
if (write_flag) {
grav_acc[index + threadIdx.x] = acc;
// fprintf(stderr, "cellId= %d index= %d n_in_cell= %d\n",
// cellId, index + threadIdx.x, n_in_cell);
// fprintf(stderr, " acc= [%f %f %f %f]\n", acc.x, acc.y, acc.z, acc.w);
}
n_interactions[index + threadIdx.x] = n_inter;
}
}
__global__ void dev_evaluate_gravity_leaf(int cell_offset,
float4 *grav_acc,
int *n_interactions,
int2 *interaction_leaf_len) {
int cellId = cell_offset + blockIdx.x;
bool write_flag = true;
if (cellId >= n_cells) {
cellId = blockIdx.x;
write_flag = false;
}
int index = tex1Dfetch(cell_bodies_offset_tex, cellId);
int n_in_cell = tex1Dfetch(n_in_cell_tex, cellId);
float4 body_pos = tex1Dfetch(bodies_pos_tex, index + threadIdx.x%n_in_cell);
float4 acc = grav_acc[index + threadIdx.x%n_in_cell];
int n_inter;
acc = evaluate_body_leaf(acc, body_pos, n_inter,
interaction_leaf_len[cellId]);
if (threadIdx.x < n_in_cell) {
if (write_flag)
grav_acc[index + threadIdx.x] = acc;
n_interactions[index + threadIdx.x] = n_inter;
}
}
#endif
|
29e681570b54a0fdea01e49b40f40bf83b98f5de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include "pcl/gpu/utils/device/block.hpp"
//#include "pcl/gpu/utils/device/warp.hpp"
//#include "pcl/gpu/utils/device/vector_math.hpp"
#include "thrust/device_ptr.h"
#include "thrust/scan.h"
namespace pcl
{
namespace device
{
//texture<int, 1, hipReadModeElementType> edgeTex;
texture<int, 1, hipReadModeElementType> triTex;
texture<int, 1, hipReadModeElementType> numVertsTex;
}
}
void
pcl::device::bindTextures (const int */*edgeBuf*/, const int *triBuf, const int *numVertsBuf)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<int>();
//cudaSafeCall(hipBindTexture(0, edgeTex, edgeBuf, desc) );
cudaSafeCall (hipBindTexture (0, triTex, triBuf, desc) );
cudaSafeCall (hipBindTexture (0, numVertsTex, numVertsBuf, desc) );
}
void
pcl::device::unbindTextures ()
{
//cudaSafeCall( hipUnbindTexture(edgeTex) );
cudaSafeCall ( hipUnbindTexture (numVertsTex) );
cudaSafeCall ( hipUnbindTexture (triTex) );
}
namespace pcl
{
namespace device
{
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct CubeIndexEstimator
{
PtrStep<short2> volume;
static __device__ __forceinline__ float isoValue() { return 0.f; }
__device__ __forceinline__ void
readTsdf (int x, int y, int z, float& tsdf, int& weight) const
{
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
}
__device__ __forceinline__ int
computeCubeIndex (int x, int y, int z, float f[8]) const
{
int weight;
readTsdf (x, y, z, f[0], weight); if (weight == 0) return 0;
readTsdf (x + 1, y, z, f[1], weight); if (weight == 0) return 0;
readTsdf (x + 1, y + 1, z, f[2], weight); if (weight == 0) return 0;
readTsdf (x, y + 1, z, f[3], weight); if (weight == 0) return 0;
readTsdf (x, y, z + 1, f[4], weight); if (weight == 0) return 0;
readTsdf (x + 1, y, z + 1, f[5], weight); if (weight == 0) return 0;
readTsdf (x + 1, y + 1, z + 1, f[6], weight); if (weight == 0) return 0;
readTsdf (x, y + 1, z + 1, f[7], weight); if (weight == 0) return 0;
// calculate flag indicating if each vertex is inside or outside isosurface
int cubeindex;
cubeindex = int(f[0] < isoValue());
cubeindex += int(f[1] < isoValue()) * 2;
cubeindex += int(f[2] < isoValue()) * 4;
cubeindex += int(f[3] < isoValue()) * 8;
cubeindex += int(f[4] < isoValue()) * 16;
cubeindex += int(f[5] < isoValue()) * 32;
cubeindex += int(f[6] < isoValue()) * 64;
cubeindex += int(f[7] < isoValue()) * 128;
return cubeindex;
}
};
struct OccupiedVoxels : public CubeIndexEstimator
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
WARPS_COUNT = CTA_SIZE / Warp::WARP_SIZE
};
mutable int* voxels_indeces;
mutable int* vetexes_number;
int max_size;
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
int ftid = Block::flattenedThreadId ();
int warp_id = Warp::id();
int lane_id = Warp::laneId();
volatile __shared__ int warps_buffer[WARPS_COUNT];
for (int z = 0; z < VOLUME_Z - 1; z++)
{
int numVerts = 0;
if (x + 1 < VOLUME_X && y + 1 < VOLUME_Y)
{
float field[8];
int cubeindex = computeCubeIndex (x, y, z, field);
// read number of vertices from texture
numVerts = (cubeindex == 0 || cubeindex == 255) ? 0 : tex1Dfetch (numVertsTex, cubeindex);
}
int total = __popc (__ballot_sync (__activemask (), numVerts > 0));
if (total == 0)
continue;
if (lane_id == 0)
{
int old = atomicAdd (&global_count, total);
warps_buffer[warp_id] = old;
}
int old_global_voxels_count = warps_buffer[warp_id];
int offs = Warp::binaryExclScan (__ballot_sync (__activemask (), numVerts > 0));
if (old_global_voxels_count + offs < max_size && numVerts > 0)
{
voxels_indeces[old_global_voxels_count + offs] = VOLUME_Y * VOLUME_X * z + VOLUME_X * y + x;
vetexes_number[old_global_voxels_count + offs] = numVerts;
}
bool full = old_global_voxels_count + total >= max_size;
if (full)
break;
} /* for(int z = 0; z < VOLUME_Z - 1; z++) */
/////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min (max_size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator () */
};
__global__ void getOccupiedVoxelsKernel (const OccupiedVoxels ov) { ov (); }
}
}
int
pcl::device::getOccupiedVoxels (const PtrStep<short2>& volume, DeviceArray2D<int>& occupied_voxels)
{
OccupiedVoxels ov;
ov.volume = volume;
ov.voxels_indeces = occupied_voxels.ptr (0);
ov.vetexes_number = occupied_voxels.ptr (1);
ov.max_size = occupied_voxels.cols ();
dim3 block (OccupiedVoxels::CTA_SIZE_X, OccupiedVoxels::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//hipFuncSetCacheConfig(getOccupiedVoxelsKernel, hipFuncCachePreferL1);
//printFuncAttrib(getOccupiedVoxelsKernel);
hipLaunchKernelGGL(( getOccupiedVoxelsKernel), dim3(grid), dim3(block), 0, 0, ov);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return size;
}
int
pcl::device::computeOffsetsAndTotalVertexes (DeviceArray2D<int>& occupied_voxels)
{
thrust::device_ptr<int> beg (occupied_voxels.ptr (1));
thrust::device_ptr<int> end = beg + occupied_voxels.cols ();
thrust::device_ptr<int> out (occupied_voxels.ptr (2));
thrust::exclusive_scan (beg, end, out);
int lastElement, lastScanElement;
DeviceArray<int> last_elem (occupied_voxels.ptr(1) + occupied_voxels.cols () - 1, 1);
DeviceArray<int> last_scan (occupied_voxels.ptr(2) + occupied_voxels.cols () - 1, 1);
last_elem.download (&lastElement);
last_scan.download (&lastScanElement);
return lastElement + lastScanElement;
}
namespace pcl
{
namespace device
{
struct TrianglesGenerator : public CubeIndexEstimator
{
enum { CTA_SIZE = 256, MAX_GRID_SIZE_X = 65536 };
const int* occupied_voxels;
const int* vertex_ofssets;
int voxels_count;
float3 cell_size;
mutable PointType *output;
__device__ __forceinline__ float3
getNodeCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to volume cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ float3
vertex_interp (float3 p0, float3 p1, float f0, float f1) const
{
float t = (isoValue() - f0) / (f1 - f0 + 1e-15f);
float x = p0.x + t * (p1.x - p0.x);
float y = p0.y + t * (p1.y - p0.y);
float z = p0.z + t * (p1.z - p0.z);
return make_float3 (x, y, z);
}
__device__ __forceinline__ void
operator () () const
{
int tid = threadIdx.x;
int idx = (blockIdx.y * MAX_GRID_SIZE_X + blockIdx.x) * CTA_SIZE + tid;
if (idx >= voxels_count)
return;
int voxel = occupied_voxels[idx];
int z = voxel / (VOLUME_X * VOLUME_Y);
int y = (voxel - z * VOLUME_X * VOLUME_Y) / VOLUME_X;
int x = (voxel - z * VOLUME_X * VOLUME_Y) - y * VOLUME_X;
float f[8];
int cubeindex = computeCubeIndex (x, y, z, f);
// calculate cell vertex positions
float3 v[8];
v[0] = getNodeCoo (x, y, z);
v[1] = getNodeCoo (x + 1, y, z);
v[2] = getNodeCoo (x + 1, y + 1, z);
v[3] = getNodeCoo (x, y + 1, z);
v[4] = getNodeCoo (x, y, z + 1);
v[5] = getNodeCoo (x + 1, y, z + 1);
v[6] = getNodeCoo (x + 1, y + 1, z + 1);
v[7] = getNodeCoo (x, y + 1, z + 1);
// find the vertices where the surface intersects the cube
// use shared memory to avoid using local
__shared__ float3 vertlist[12][CTA_SIZE];
vertlist[0][tid] = vertex_interp (v[0], v[1], f[0], f[1]);
vertlist[1][tid] = vertex_interp (v[1], v[2], f[1], f[2]);
vertlist[2][tid] = vertex_interp (v[2], v[3], f[2], f[3]);
vertlist[3][tid] = vertex_interp (v[3], v[0], f[3], f[0]);
vertlist[4][tid] = vertex_interp (v[4], v[5], f[4], f[5]);
vertlist[5][tid] = vertex_interp (v[5], v[6], f[5], f[6]);
vertlist[6][tid] = vertex_interp (v[6], v[7], f[6], f[7]);
vertlist[7][tid] = vertex_interp (v[7], v[4], f[7], f[4]);
vertlist[8][tid] = vertex_interp (v[0], v[4], f[0], f[4]);
vertlist[9][tid] = vertex_interp (v[1], v[5], f[1], f[5]);
vertlist[10][tid] = vertex_interp (v[2], v[6], f[2], f[6]);
vertlist[11][tid] = vertex_interp (v[3], v[7], f[3], f[7]);
__syncthreads ();
// output triangle vertices
int numVerts = tex1Dfetch (numVertsTex, cubeindex);
for (int i = 0; i < numVerts; i += 3)
{
int index = vertex_ofssets[idx] + i;
int v1 = tex1Dfetch (triTex, (cubeindex * 16) + i + 0);
int v2 = tex1Dfetch (triTex, (cubeindex * 16) + i + 1);
int v3 = tex1Dfetch (triTex, (cubeindex * 16) + i + 2);
store_point (output, index + 0, vertlist[v1][tid]);
store_point (output, index + 1, vertlist[v2][tid]);
store_point (output, index + 2, vertlist[v3][tid]);
}
}
__device__ __forceinline__ void
store_point (float4 *ptr, int index, const float3& point) const {
ptr[index] = make_float4 (point.x, point.y, point.z, 1.0f);
}
};
__global__ void
trianglesGeneratorKernel (const TrianglesGenerator tg) {tg (); }
}
}
void
pcl::device::generateTriangles (const PtrStep<short2>& volume, const DeviceArray2D<int>& occupied_voxels, const float3& volume_size, DeviceArray<PointType>& output)
{
int device;
cudaSafeCall( hipGetDevice(&device) );
hipDeviceProp_t prop;
cudaSafeCall( hipGetDeviceProperties(&prop, device) );
int block_size = prop.major < 2 ? 96 : 256; // please see TrianglesGenerator::CTA_SIZE
using Tg = TrianglesGenerator;
Tg tg;
tg.volume = volume;
tg.occupied_voxels = occupied_voxels.ptr (0);
tg.vertex_ofssets = occupied_voxels.ptr (2);
tg.voxels_count = occupied_voxels.cols ();
tg.cell_size.x = volume_size.x / VOLUME_X;
tg.cell_size.y = volume_size.y / VOLUME_Y;
tg.cell_size.z = volume_size.z / VOLUME_Z;
tg.output = output;
int blocks_num = divUp (tg.voxels_count, block_size);
dim3 block (block_size);
dim3 grid(min(blocks_num, Tg::MAX_GRID_SIZE_X), divUp(blocks_num, Tg::MAX_GRID_SIZE_X));
hipLaunchKernelGGL(( trianglesGeneratorKernel), dim3(grid), dim3(block), 0, 0, tg);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
} | 29e681570b54a0fdea01e49b40f40bf83b98f5de.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include "pcl/gpu/utils/device/block.hpp"
//#include "pcl/gpu/utils/device/warp.hpp"
//#include "pcl/gpu/utils/device/vector_math.hpp"
#include "thrust/device_ptr.h"
#include "thrust/scan.h"
namespace pcl
{
namespace device
{
//texture<int, 1, cudaReadModeElementType> edgeTex;
texture<int, 1, cudaReadModeElementType> triTex;
texture<int, 1, cudaReadModeElementType> numVertsTex;
}
}
void
pcl::device::bindTextures (const int */*edgeBuf*/, const int *triBuf, const int *numVertsBuf)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<int>();
//cudaSafeCall(cudaBindTexture(0, edgeTex, edgeBuf, desc) );
cudaSafeCall (cudaBindTexture (0, triTex, triBuf, desc) );
cudaSafeCall (cudaBindTexture (0, numVertsTex, numVertsBuf, desc) );
}
void
pcl::device::unbindTextures ()
{
//cudaSafeCall( cudaUnbindTexture(edgeTex) );
cudaSafeCall ( cudaUnbindTexture (numVertsTex) );
cudaSafeCall ( cudaUnbindTexture (triTex) );
}
namespace pcl
{
namespace device
{
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct CubeIndexEstimator
{
PtrStep<short2> volume;
static __device__ __forceinline__ float isoValue() { return 0.f; }
__device__ __forceinline__ void
readTsdf (int x, int y, int z, float& tsdf, int& weight) const
{
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
}
__device__ __forceinline__ int
computeCubeIndex (int x, int y, int z, float f[8]) const
{
int weight;
readTsdf (x, y, z, f[0], weight); if (weight == 0) return 0;
readTsdf (x + 1, y, z, f[1], weight); if (weight == 0) return 0;
readTsdf (x + 1, y + 1, z, f[2], weight); if (weight == 0) return 0;
readTsdf (x, y + 1, z, f[3], weight); if (weight == 0) return 0;
readTsdf (x, y, z + 1, f[4], weight); if (weight == 0) return 0;
readTsdf (x + 1, y, z + 1, f[5], weight); if (weight == 0) return 0;
readTsdf (x + 1, y + 1, z + 1, f[6], weight); if (weight == 0) return 0;
readTsdf (x, y + 1, z + 1, f[7], weight); if (weight == 0) return 0;
// calculate flag indicating if each vertex is inside or outside isosurface
int cubeindex;
cubeindex = int(f[0] < isoValue());
cubeindex += int(f[1] < isoValue()) * 2;
cubeindex += int(f[2] < isoValue()) * 4;
cubeindex += int(f[3] < isoValue()) * 8;
cubeindex += int(f[4] < isoValue()) * 16;
cubeindex += int(f[5] < isoValue()) * 32;
cubeindex += int(f[6] < isoValue()) * 64;
cubeindex += int(f[7] < isoValue()) * 128;
return cubeindex;
}
};
struct OccupiedVoxels : public CubeIndexEstimator
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
WARPS_COUNT = CTA_SIZE / Warp::WARP_SIZE
};
mutable int* voxels_indeces;
mutable int* vetexes_number;
int max_size;
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
int ftid = Block::flattenedThreadId ();
int warp_id = Warp::id();
int lane_id = Warp::laneId();
volatile __shared__ int warps_buffer[WARPS_COUNT];
for (int z = 0; z < VOLUME_Z - 1; z++)
{
int numVerts = 0;
if (x + 1 < VOLUME_X && y + 1 < VOLUME_Y)
{
float field[8];
int cubeindex = computeCubeIndex (x, y, z, field);
// read number of vertices from texture
numVerts = (cubeindex == 0 || cubeindex == 255) ? 0 : tex1Dfetch (numVertsTex, cubeindex);
}
int total = __popc (__ballot_sync (__activemask (), numVerts > 0));
if (total == 0)
continue;
if (lane_id == 0)
{
int old = atomicAdd (&global_count, total);
warps_buffer[warp_id] = old;
}
int old_global_voxels_count = warps_buffer[warp_id];
int offs = Warp::binaryExclScan (__ballot_sync (__activemask (), numVerts > 0));
if (old_global_voxels_count + offs < max_size && numVerts > 0)
{
voxels_indeces[old_global_voxels_count + offs] = VOLUME_Y * VOLUME_X * z + VOLUME_X * y + x;
vetexes_number[old_global_voxels_count + offs] = numVerts;
}
bool full = old_global_voxels_count + total >= max_size;
if (full)
break;
} /* for(int z = 0; z < VOLUME_Z - 1; z++) */
/////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min (max_size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator () */
};
__global__ void getOccupiedVoxelsKernel (const OccupiedVoxels ov) { ov (); }
}
}
int
pcl::device::getOccupiedVoxels (const PtrStep<short2>& volume, DeviceArray2D<int>& occupied_voxels)
{
OccupiedVoxels ov;
ov.volume = volume;
ov.voxels_indeces = occupied_voxels.ptr (0);
ov.vetexes_number = occupied_voxels.ptr (1);
ov.max_size = occupied_voxels.cols ();
dim3 block (OccupiedVoxels::CTA_SIZE_X, OccupiedVoxels::CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//cudaFuncSetCacheConfig(getOccupiedVoxelsKernel, cudaFuncCachePreferL1);
//printFuncAttrib(getOccupiedVoxelsKernel);
getOccupiedVoxelsKernel<<<grid, block>>>(ov);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return size;
}
int
pcl::device::computeOffsetsAndTotalVertexes (DeviceArray2D<int>& occupied_voxels)
{
thrust::device_ptr<int> beg (occupied_voxels.ptr (1));
thrust::device_ptr<int> end = beg + occupied_voxels.cols ();
thrust::device_ptr<int> out (occupied_voxels.ptr (2));
thrust::exclusive_scan (beg, end, out);
int lastElement, lastScanElement;
DeviceArray<int> last_elem (occupied_voxels.ptr(1) + occupied_voxels.cols () - 1, 1);
DeviceArray<int> last_scan (occupied_voxels.ptr(2) + occupied_voxels.cols () - 1, 1);
last_elem.download (&lastElement);
last_scan.download (&lastScanElement);
return lastElement + lastScanElement;
}
namespace pcl
{
namespace device
{
struct TrianglesGenerator : public CubeIndexEstimator
{
enum { CTA_SIZE = 256, MAX_GRID_SIZE_X = 65536 };
const int* occupied_voxels;
const int* vertex_ofssets;
int voxels_count;
float3 cell_size;
mutable PointType *output;
__device__ __forceinline__ float3
getNodeCoo (int x, int y, int z) const
{
float3 coo = make_float3 (x, y, z);
coo += 0.5f; //shift to volume cell center;
coo.x *= cell_size.x;
coo.y *= cell_size.y;
coo.z *= cell_size.z;
return coo;
}
__device__ __forceinline__ float3
vertex_interp (float3 p0, float3 p1, float f0, float f1) const
{
float t = (isoValue() - f0) / (f1 - f0 + 1e-15f);
float x = p0.x + t * (p1.x - p0.x);
float y = p0.y + t * (p1.y - p0.y);
float z = p0.z + t * (p1.z - p0.z);
return make_float3 (x, y, z);
}
__device__ __forceinline__ void
operator () () const
{
int tid = threadIdx.x;
int idx = (blockIdx.y * MAX_GRID_SIZE_X + blockIdx.x) * CTA_SIZE + tid;
if (idx >= voxels_count)
return;
int voxel = occupied_voxels[idx];
int z = voxel / (VOLUME_X * VOLUME_Y);
int y = (voxel - z * VOLUME_X * VOLUME_Y) / VOLUME_X;
int x = (voxel - z * VOLUME_X * VOLUME_Y) - y * VOLUME_X;
float f[8];
int cubeindex = computeCubeIndex (x, y, z, f);
// calculate cell vertex positions
float3 v[8];
v[0] = getNodeCoo (x, y, z);
v[1] = getNodeCoo (x + 1, y, z);
v[2] = getNodeCoo (x + 1, y + 1, z);
v[3] = getNodeCoo (x, y + 1, z);
v[4] = getNodeCoo (x, y, z + 1);
v[5] = getNodeCoo (x + 1, y, z + 1);
v[6] = getNodeCoo (x + 1, y + 1, z + 1);
v[7] = getNodeCoo (x, y + 1, z + 1);
// find the vertices where the surface intersects the cube
// use shared memory to avoid using local
__shared__ float3 vertlist[12][CTA_SIZE];
vertlist[0][tid] = vertex_interp (v[0], v[1], f[0], f[1]);
vertlist[1][tid] = vertex_interp (v[1], v[2], f[1], f[2]);
vertlist[2][tid] = vertex_interp (v[2], v[3], f[2], f[3]);
vertlist[3][tid] = vertex_interp (v[3], v[0], f[3], f[0]);
vertlist[4][tid] = vertex_interp (v[4], v[5], f[4], f[5]);
vertlist[5][tid] = vertex_interp (v[5], v[6], f[5], f[6]);
vertlist[6][tid] = vertex_interp (v[6], v[7], f[6], f[7]);
vertlist[7][tid] = vertex_interp (v[7], v[4], f[7], f[4]);
vertlist[8][tid] = vertex_interp (v[0], v[4], f[0], f[4]);
vertlist[9][tid] = vertex_interp (v[1], v[5], f[1], f[5]);
vertlist[10][tid] = vertex_interp (v[2], v[6], f[2], f[6]);
vertlist[11][tid] = vertex_interp (v[3], v[7], f[3], f[7]);
__syncthreads ();
// output triangle vertices
int numVerts = tex1Dfetch (numVertsTex, cubeindex);
for (int i = 0; i < numVerts; i += 3)
{
int index = vertex_ofssets[idx] + i;
int v1 = tex1Dfetch (triTex, (cubeindex * 16) + i + 0);
int v2 = tex1Dfetch (triTex, (cubeindex * 16) + i + 1);
int v3 = tex1Dfetch (triTex, (cubeindex * 16) + i + 2);
store_point (output, index + 0, vertlist[v1][tid]);
store_point (output, index + 1, vertlist[v2][tid]);
store_point (output, index + 2, vertlist[v3][tid]);
}
}
__device__ __forceinline__ void
store_point (float4 *ptr, int index, const float3& point) const {
ptr[index] = make_float4 (point.x, point.y, point.z, 1.0f);
}
};
__global__ void
trianglesGeneratorKernel (const TrianglesGenerator tg) {tg (); }
}
}
void
pcl::device::generateTriangles (const PtrStep<short2>& volume, const DeviceArray2D<int>& occupied_voxels, const float3& volume_size, DeviceArray<PointType>& output)
{
int device;
cudaSafeCall( cudaGetDevice(&device) );
cudaDeviceProp prop;
cudaSafeCall( cudaGetDeviceProperties(&prop, device) );
int block_size = prop.major < 2 ? 96 : 256; // please see TrianglesGenerator::CTA_SIZE
using Tg = TrianglesGenerator;
Tg tg;
tg.volume = volume;
tg.occupied_voxels = occupied_voxels.ptr (0);
tg.vertex_ofssets = occupied_voxels.ptr (2);
tg.voxels_count = occupied_voxels.cols ();
tg.cell_size.x = volume_size.x / VOLUME_X;
tg.cell_size.y = volume_size.y / VOLUME_Y;
tg.cell_size.z = volume_size.z / VOLUME_Z;
tg.output = output;
int blocks_num = divUp (tg.voxels_count, block_size);
dim3 block (block_size);
dim3 grid(min(blocks_num, Tg::MAX_GRID_SIZE_X), divUp(blocks_num, Tg::MAX_GRID_SIZE_X));
trianglesGeneratorKernel<<<grid, block>>>(tg);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
} |
b8b66b2ad5039df39c07a037b32d31870c6f5992.hip | // !!! This is a file automatically generated by hipify!!!
#include "TestSuit.h"
#include "mathcuda.h"
//#include "allKernelFct.cuh"
#include "CudaMem.h"
TestSuit::TestSuit() {
// TODO Auto-generated destructor stub
}
TestSuit::~TestSuit() {
// TODO Auto-generated destructor stub
}
void TestSuit::testCudaFunctions()
{
float h1[16], h2[16], r1[16], r2[16];
for(unsigned int i=0; i<16; i++)
{
h1[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
h2[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
mm16(h1, h2, r1);
float *d_h1, *d_h2, *d_r2;
CudaMem::cudaMemAllocReport((void**)&d_h1, 16*sizeof(float));
CudaMem::cudaMemAllocReport((void**)&d_h2, 16*sizeof(float));
CudaMem::cudaMemAllocReport((void**)&d_r2, 16*sizeof(float));
CudaMem::cudaMemCpyReport(d_h1, h1, 16*sizeof(float), hipMemcpyHostToDevice);
CudaMem::cudaMemCpyReport(d_h2, h2, 16*sizeof(float), hipMemcpyHostToDevice);
hipError_t cudaStatus;
//cuda_calc::testMM16<<<1, 16>>>(d_h1, d_h2, d_r2);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "test kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching test kernel!\n", cudaStatus);
}
CudaMem::cudaMemCpyReport(r2, d_r2, 16*sizeof(float), hipMemcpyDeviceToHost);
}
void TestSuit::testCameraParameters(struct CAM* cam)
{
} | b8b66b2ad5039df39c07a037b32d31870c6f5992.cu | #include "TestSuit.h"
#include "mathcuda.h"
//#include "allKernelFct.cuh"
#include "CudaMem.h"
TestSuit::TestSuit() {
// TODO Auto-generated destructor stub
}
TestSuit::~TestSuit() {
// TODO Auto-generated destructor stub
}
void TestSuit::testCudaFunctions()
{
float h1[16], h2[16], r1[16], r2[16];
for(unsigned int i=0; i<16; i++)
{
h1[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
h2[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
mm16(h1, h2, r1);
float *d_h1, *d_h2, *d_r2;
CudaMem::cudaMemAllocReport((void**)&d_h1, 16*sizeof(float));
CudaMem::cudaMemAllocReport((void**)&d_h2, 16*sizeof(float));
CudaMem::cudaMemAllocReport((void**)&d_r2, 16*sizeof(float));
CudaMem::cudaMemCpyReport(d_h1, h1, 16*sizeof(float), cudaMemcpyHostToDevice);
CudaMem::cudaMemCpyReport(d_h2, h2, 16*sizeof(float), cudaMemcpyHostToDevice);
cudaError_t cudaStatus;
//cuda_calc::testMM16<<<1, 16>>>(d_h1, d_h2, d_r2);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "test kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching test kernel!\n", cudaStatus);
}
CudaMem::cudaMemCpyReport(r2, d_r2, 16*sizeof(float), cudaMemcpyDeviceToHost);
}
void TestSuit::testCameraParameters(struct CAM* cam)
{
} |
a2602daa68dddbb7f431bfb85e59b45d58cd0961.hip | // !!! This is a file automatically generated by hipify!!!
//Github - https://github.com/arer90/Accelerated_Computing.git
/*
Section 0. Basic CUDA.
phase 0. Checking by viusal studio
This is test for your device(GPU) can access it with a simple sentence.
Moreover, it is like a simple test like "Hello world" in C and C++ language.
By the way, CUDA with Visual Studio is basically made of C and C++ language only.
Python, matlab, or any other languages are possilbe to compile for using a GPU,
but for easy understanding of CUDA, I will use only C and C++ language.
This is made by 'arer90'
*/
// C or C++ header included.
#include <iostream>
// This is basic CUDA header file. Please remember this like
// C -> <stdio.h> or C++ -> <iostream>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
// namespace for C++ (-iostream header file)
// if you are not similar to C++ language, please follow this sequences.
using namespace std;
// Device(GPU) function : This is function for Device of GPU.
/*
__global__ : this is a main memory in GPU that every CPU values copied to this memory
and GPU will use this memory for fastest calculation or any progress.
*/
__global__ void print_example() {
printf("This is test for CUDA language with GPU.\n");
}
int main() {
/*
This is Device Call while you're using a CUDA.
Basically, we are using a function like following examples.
Ex.) void print_example ( int value ,.....);
function type function name ( [parameter type] [parameter name],.....);
However, for calling a GPU device, we need a extra words like '<<< >>>'.
First sampel is <<<1,1>>>
but you can change number of <<<N,N>>>
However, if you want to change setup, please click the 'Clean All' from [build(B)] top bar.
*/
hipLaunchKernelGGL(( print_example), dim3(1),dim3(1), 0, 0, );
return 0;
}
| a2602daa68dddbb7f431bfb85e59b45d58cd0961.cu | //Github - https://github.com/arer90/Accelerated_Computing.git
/*
Section 0. Basic CUDA.
phase 0. Checking by viusal studio
This is test for your device(GPU) can access it with a simple sentence.
Moreover, it is like a simple test like "Hello world" in C and C++ language.
By the way, CUDA with Visual Studio is basically made of C and C++ language only.
Python, matlab, or any other languages are possilbe to compile for using a GPU,
but for easy understanding of CUDA, I will use only C and C++ language.
This is made by 'arer90'
*/
// C or C++ header included.
#include <iostream>
// This is basic CUDA header file. Please remember this like
// C -> <stdio.h> or C++ -> <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// namespace for C++ (-iostream header file)
// if you are not similar to C++ language, please follow this sequences.
using namespace std;
// Device(GPU) function : This is function for Device of GPU.
/*
__global__ : this is a main memory in GPU that every CPU values copied to this memory
and GPU will use this memory for fastest calculation or any progress.
*/
__global__ void print_example() {
printf("This is test for CUDA language with GPU.\n");
}
int main() {
/*
This is Device Call while you're using a CUDA.
Basically, we are using a function like following examples.
Ex.) void print_example ( int value ,.....);
function type function name ( [parameter type] [parameter name],.....);
However, for calling a GPU device, we need a extra words like '<<< >>>'.
First sampel is <<<1,1>>>
but you can change number of <<<N,N>>>
However, if you want to change setup, please click the 'Clean All' from [build(B)] top bar.
*/
print_example<<<1,1>>>();
return 0;
}
|
7155990e0570c632d214ce73ecd2a278c956fa15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <trajectory_generation/GATE.cuh>
#include <math_constants.h>
#include <random>
#include <chrono>
#include <iostream>
namespace GATE_internal {
template <class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS>
__global__ void rolloutKernelRK4(DYN_T* dynamics_device, GUIDANCE_T* guidance_device, PERT_T* perturbations_device,
float* state_trajectories_device,
float dt)
{
Eigen::Matrix<float, DYN_T::STATE_DIM, 1> x_k;
Eigen::Matrix<float, DYN_T::STATE_DIM, 1> x_ktmp;
Eigen::Matrix<float, DYN_T::STATE_DIM, 1> x_kp1 = DYN_T::state_array::Zero();
Eigen::Matrix<float, DYN_T::STATE_DIM, 1> xdot = DYN_T::state_array::Zero();
Eigen::Matrix<float, DYN_T::CONTROL_DIM, 1> u_k = DYN_T::control_array::Zero(); // TODO: CANNOT CALL BLOCK FUNCTION WITHOUT HARD - CODING DIMENSIONS
Eigen::Matrix<float, DYN_T::CONTROL_DIM, 1> u_kp1 = DYN_T::control_array::Zero();
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Copy the initial condition
if (tid < NUM_ROLLOUTS) {
x_k.block<DYN_T::STATE_DIM, 1>(0,0) << (*(perturbations_device->x0_pert_d_)).block<DYN_T::STATE_DIM, 1>(0, tid);
// fill in states at time = 0
for (int i = 0; i < DYN_T::STATE_DIM; ++i) {
state_trajectories_device[tid * DYN_T::STATE_DIM * NUM_TIMESTEPS + 0 * DYN_T::STATE_DIM + i] = x_k(i);
}
__syncthreads();
// Integrate dynamics forward!
for (int k = 1; k < NUM_TIMESTEPS; ++k) {
guidance_device->getControl(perturbations_device, k, tid, NUM_ROLLOUTS, &x_k, &xdot, &u_k, &u_kp1);
__syncthreads();
if (u_k.array().isNaN().sum() > 0 && tid == 13) {
printf("WARNING:: u_k NAN detected! on timestep: %i, and rollout %i\n", k, tid);
printf("u(%i)=%f\n", k, u_k[0]);
}
if (x_k.array().isNaN().sum() > 0 && tid == 13) {
printf("WARNING:: x_k NAN detected! on timestep: %i, and rollout %i\n", k, tid);
printf("x_k(%i)=(%f, %f)\n", k, x_k[0], x_k[1]);
}
integrators_eigen::rk4<DYN_T, PERT_T>(dynamics_device, perturbations_device, k, tid, x_k, u_k, dt, u_kp1, xdot, x_kp1);
if (x_kp1.array().isNaN().sum() > 0 && tid == 13) {
printf("WARNING:: x_kp1 NAN detected! on timestep: %i, and rollout %i\n", k, tid);
printf("x_kp1(%i)=(%f, %f)\n", k, x_kp1[0], x_kp1[1]);
}
for (int i = 0; i < DYN_T::STATE_DIM; ++i) {
x_k.block<1, 1>(i, 0) << x_kp1.block<1, 1>(i, 0);
}
__syncthreads();
// Save the current state into the global variable that holds all the rollouts
for (int i = 0; i < DYN_T::STATE_DIM; ++i) {
state_trajectories_device[tid * DYN_T::STATE_DIM * NUM_TIMESTEPS + k * DYN_T::STATE_DIM + i] = x_k(i);
}
}
}
};
}
#define GATE_CLASS GATE<DYN_T, GUIDANCE_T, PERT_T, NUM_TIMESTEPS, NUM_ROLLOUTS, BDIM_X>
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
GATE_CLASS::GATE(DYN_T* dynamics, GUIDANCE_T* guidance, PERT_T* perturbation, state_array x0, float dt) :
dynamics_(dynamics), guidance_(guidance), perturbation_(perturbation), dt(dt)
{
// Set the cuda stream to the one provided by the dynamics
stream_ = dynamics->stream_;
// Call the GPU setup functions
dynamics_->GPUSetup();
guidance_->GPUSetup();
perturbation_->GPUSetup();
state_trajectories_host.resize(DYN_T::STATE_DIM * NUM_TIMESTEPS * NUM_ROLLOUTS);
// Allocate CUDA memory
allocateCUDAMemory();
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
GATE_CLASS::~GATE()
{
deallocateCUDAMemory();
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::allocateHostMemory()
{
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::allocateCUDAMemory()
{
// Allocate memory for state trajectories
HANDLE_ERROR(hipMalloc((void**)&state_trajectories_device, DYN_T::STATE_DIM * NUM_TIMESTEPS * NUM_ROLLOUTS * sizeof(float)));
CudaCheckError();
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::deallocateCUDAMemory()
{
hipFree(state_trajectories_device);
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASScopyHostToDeviceArrays()
{
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::copyDeviceToHostArrays()
{
hipMemcpyAsync(state_trajectories_host.data(), state_trajectories_device, DYN_T::STATE_DIM * NUM_TIMESTEPS * NUM_ROLLOUTS * sizeof(float), hipMemcpyDeviceToHost, stream_);
CudaCheckError();
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::computeTrajectories()
{
int num_blocks = (NUM_ROLLOUTS / BDIM_X) + 1;
// Memory is already allocated lets check how much is left
size_t free_1, total_1, free_3, total_3;
hipMemGetInfo(&free_1, &total_1); // Check free/total GPU memory prior to allocation
CudaCheckError();
//std::cout << "Free Memory: " << free_1 << std::endl;
//std::cout << "Total Memory: " << total_1 << std::endl;
auto start = std::chrono::system_clock::now();
hipLaunchKernelGGL(( GATE_internal::rolloutKernelRK4<DYN_T, GUIDANCE_T, PERT_T, NUM_TIMESTEPS, NUM_ROLLOUTS>), dim3(num_blocks), dim3(BDIM_X), 0, stream_, dynamics_->model_d_,
guidance_->guidance_d_,
perturbation_->perturbations_d_,
state_trajectories_device,
dt);
CudaCheckError();
copyDeviceToHostArrays();
auto end = std::chrono::system_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
std::cout << "Trajectory Compute Time: " << elapsed.count() / 1000.0 << " seconds." << std::endl;
}
#undef GATE_CLASS
| 7155990e0570c632d214ce73ecd2a278c956fa15.cu | #include <trajectory_generation/GATE.cuh>
#include <math_constants.h>
#include <random>
#include <chrono>
#include <iostream>
namespace GATE_internal {
template <class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS>
__global__ void rolloutKernelRK4(DYN_T* dynamics_device, GUIDANCE_T* guidance_device, PERT_T* perturbations_device,
float* state_trajectories_device,
float dt)
{
Eigen::Matrix<float, DYN_T::STATE_DIM, 1> x_k;
Eigen::Matrix<float, DYN_T::STATE_DIM, 1> x_ktmp;
Eigen::Matrix<float, DYN_T::STATE_DIM, 1> x_kp1 = DYN_T::state_array::Zero();
Eigen::Matrix<float, DYN_T::STATE_DIM, 1> xdot = DYN_T::state_array::Zero();
Eigen::Matrix<float, DYN_T::CONTROL_DIM, 1> u_k = DYN_T::control_array::Zero(); // TODO: CANNOT CALL BLOCK FUNCTION WITHOUT HARD - CODING DIMENSIONS
Eigen::Matrix<float, DYN_T::CONTROL_DIM, 1> u_kp1 = DYN_T::control_array::Zero();
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Copy the initial condition
if (tid < NUM_ROLLOUTS) {
x_k.block<DYN_T::STATE_DIM, 1>(0,0) << (*(perturbations_device->x0_pert_d_)).block<DYN_T::STATE_DIM, 1>(0, tid);
// fill in states at time = 0
for (int i = 0; i < DYN_T::STATE_DIM; ++i) {
state_trajectories_device[tid * DYN_T::STATE_DIM * NUM_TIMESTEPS + 0 * DYN_T::STATE_DIM + i] = x_k(i);
}
__syncthreads();
// Integrate dynamics forward!
for (int k = 1; k < NUM_TIMESTEPS; ++k) {
guidance_device->getControl(perturbations_device, k, tid, NUM_ROLLOUTS, &x_k, &xdot, &u_k, &u_kp1);
__syncthreads();
if (u_k.array().isNaN().sum() > 0 && tid == 13) {
printf("WARNING:: u_k NAN detected! on timestep: %i, and rollout %i\n", k, tid);
printf("u(%i)=%f\n", k, u_k[0]);
}
if (x_k.array().isNaN().sum() > 0 && tid == 13) {
printf("WARNING:: x_k NAN detected! on timestep: %i, and rollout %i\n", k, tid);
printf("x_k(%i)=(%f, %f)\n", k, x_k[0], x_k[1]);
}
integrators_eigen::rk4<DYN_T, PERT_T>(dynamics_device, perturbations_device, k, tid, x_k, u_k, dt, u_kp1, xdot, x_kp1);
if (x_kp1.array().isNaN().sum() > 0 && tid == 13) {
printf("WARNING:: x_kp1 NAN detected! on timestep: %i, and rollout %i\n", k, tid);
printf("x_kp1(%i)=(%f, %f)\n", k, x_kp1[0], x_kp1[1]);
}
for (int i = 0; i < DYN_T::STATE_DIM; ++i) {
x_k.block<1, 1>(i, 0) << x_kp1.block<1, 1>(i, 0);
}
__syncthreads();
// Save the current state into the global variable that holds all the rollouts
for (int i = 0; i < DYN_T::STATE_DIM; ++i) {
state_trajectories_device[tid * DYN_T::STATE_DIM * NUM_TIMESTEPS + k * DYN_T::STATE_DIM + i] = x_k(i);
}
}
}
};
}
#define GATE_CLASS GATE<DYN_T, GUIDANCE_T, PERT_T, NUM_TIMESTEPS, NUM_ROLLOUTS, BDIM_X>
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
GATE_CLASS::GATE(DYN_T* dynamics, GUIDANCE_T* guidance, PERT_T* perturbation, state_array x0, float dt) :
dynamics_(dynamics), guidance_(guidance), perturbation_(perturbation), dt(dt)
{
// Set the cuda stream to the one provided by the dynamics
stream_ = dynamics->stream_;
// Call the GPU setup functions
dynamics_->GPUSetup();
guidance_->GPUSetup();
perturbation_->GPUSetup();
state_trajectories_host.resize(DYN_T::STATE_DIM * NUM_TIMESTEPS * NUM_ROLLOUTS);
// Allocate CUDA memory
allocateCUDAMemory();
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
GATE_CLASS::~GATE()
{
deallocateCUDAMemory();
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::allocateHostMemory()
{
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::allocateCUDAMemory()
{
// Allocate memory for state trajectories
HANDLE_ERROR(cudaMalloc((void**)&state_trajectories_device, DYN_T::STATE_DIM * NUM_TIMESTEPS * NUM_ROLLOUTS * sizeof(float)));
CudaCheckError();
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::deallocateCUDAMemory()
{
cudaFree(state_trajectories_device);
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASScopyHostToDeviceArrays()
{
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::copyDeviceToHostArrays()
{
cudaMemcpyAsync(state_trajectories_host.data(), state_trajectories_device, DYN_T::STATE_DIM * NUM_TIMESTEPS * NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, stream_);
CudaCheckError();
}
template<class DYN_T, class GUIDANCE_T, class PERT_T, int NUM_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X>
void GATE_CLASS::computeTrajectories()
{
int num_blocks = (NUM_ROLLOUTS / BDIM_X) + 1;
// Memory is already allocated lets check how much is left
size_t free_1, total_1, free_3, total_3;
cudaMemGetInfo(&free_1, &total_1); // Check free/total GPU memory prior to allocation
CudaCheckError();
//std::cout << "Free Memory: " << free_1 << std::endl;
//std::cout << "Total Memory: " << total_1 << std::endl;
auto start = std::chrono::system_clock::now();
GATE_internal::rolloutKernelRK4<DYN_T, GUIDANCE_T, PERT_T, NUM_TIMESTEPS, NUM_ROLLOUTS><<<num_blocks, BDIM_X, 0, stream_>>>(dynamics_->model_d_,
guidance_->guidance_d_,
perturbation_->perturbations_d_,
state_trajectories_device,
dt);
CudaCheckError();
copyDeviceToHostArrays();
auto end = std::chrono::system_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
std::cout << "Trajectory Compute Time: " << elapsed.count() / 1000.0 << " seconds." << std::endl;
}
#undef GATE_CLASS
|
c0a6e673d4ffb98cc1de782223c17a7828da3232.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <iostream>
// 3rd Party Libraries
#include <boost/filesystem.hpp>
#include <opencv2/opencv.hpp>
#include <chrono>
using namespace boost::filesystem;
/*! Convert the specified image to grayscale.
\param input_image Image to process.
\return Output image.
*/
cv::Mat averageFilter(const cv::Mat &input_image){
int cols = input_image.cols;
int rows = input_image.rows;
// Simplify borders problem
cv::Mat input_image_extra_border(cv::Size(cols, rows), CV_8UC1);
cv::Mat output_image(cv::Size(cols, rows), CV_8UC1);
cv::copyMakeBorder(input_image, input_image_extra_border, 1, 1, 1, 1, cv::BORDER_REPLICATE);
std::cout << "\tInput image size: " << input_image.rows << ", " << input_image.cols << std::endl;
std::cout << "\tInput with border image size: " << input_image_extra_border.rows << ", "
<< input_image_extra_border.cols << std::endl;
const int offset[9] = { -(int) input_image.step + 1, -(int) input_image.step, -(int) input_image.step - 1, \
-1, 0, +1, \
(int) input_image.step - 1, (int) input_image.step, (int) input_image.step + 1};
//Pointers
unsigned char *input_ptr = (unsigned char*)(input_image_extra_border.data);
unsigned char *output_ptr = (unsigned char*)(output_image.data);
for(int i = 1;i < input_image.rows;i++){
for(int j = 1;j < input_image.cols;j++){
int average = 0 ;
for (int k = 0; k < 9; ++k)
average += input_ptr[input_image_extra_border.step * i + j + offset[k]];
average = average / 9;
output_ptr[output_image.step * (i - 1) + (j - 1)] = (unsigned char) average;
}
}
return output_image;
}
/*! Convert the specified image to grayscale.
\param input_image Image to process.
\param input_image Output image.
*/
void rgb2gray(const cv::Mat &input_image, cv::Mat &output_image){
//Pointers
unsigned char *input_ptr = (unsigned char*)(input_image.data);
unsigned char *output_ptr = (unsigned char*)(output_image.data);
for(int i = 0;i < input_image.rows;i++){
for(int j = 0;j < input_image.cols;j++){
unsigned char b = input_ptr[input_image.step * i + (j * 3) ] ;
unsigned char g = input_ptr[input_image.step * i + (j * 3) + 1];
unsigned char r = input_ptr[input_image.step * i + (j * 3) + 2];
output_ptr[output_image.step * i + j] = 0.21 * r + 0.72 * g + 0.07 * b;
}
}
}
__global__ void boxFilter3x3_ver1 (unsigned char * srcD, unsigned char * dstD, int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y>=height)
return;
int widthSrc = width + 2;
int heightSrc = height + 2;
int sum = 0
unsigned char i0, i1, i2, i3, i4, i5, i6, i7, i8;
i0 = srcD[widthSrc * y + x];
i1 = srcD[widthSrc * y + x + 1];
i2 = srcD[widthSrc * y + x + 2];
i3 = srcD[widthSrc * (y + 1) + x];
i4 = srcD[widthSrc * (y + 1) + x + 1];
i5 = srcD[widthSrc * (y + 1) + x + 2];
i6 = srcD[widthSrc * (y + 2) + x];
i7 = srcD[widthSrc * (y + 2) + x + 1];
i8 = srcD[widthSrc * (y + 2) + x + 2];
dstD[y * width + x] = (i0 + i1 + i2 + i3 + i4 + i5 + i6 + i7 + i8 ) / 9;
}
__global__ void rgb2gray_ver2(unsigned char * d_src, unsigned char * d_dst, int width, int height)
{
int pos_x = blockIdx.x * blockDim.x + threadIdx.x;
if (pos_x >= (width * height))
return;
unsigned char b = d_src[(pos_x * 3)];
unsigned char g = d_src[(pos_x * 3) + 1];
unsigned char r = d_src[(pos_x * 3) + 2];
unsigned int _gray = (unsigned int)((float)(0.21 * r + 0.72 * g + 0.07 * b));
unsigned char gray = _gray > 255 ? 255 : _gray;
d_dst[pos_x] = gray;
}
__global__ void rgb2gray_ver1(unsigned char * d_src, unsigned char * d_dst, int width, int height)
{
int pos_x = blockIdx.x * blockDim.x + threadIdx.x;
int pos_y = blockIdx.y * blockDim.y + threadIdx.y;
if (pos_x >= width || pos_y >= height )
return;
unsigned char b = d_src[pos_y * (width * 3) + (pos_x * 3)];
unsigned char g = d_src[pos_y * (width * 3) + (pos_x * 3) + 1];
unsigned char r = d_src[pos_y * (width * 3) + (pos_x * 3) + 2];
unsigned int _gray = (unsigned int)((float)(0.21 * r + 0.72 * g + 0.07 * b));
unsigned char gray = _gray > 255 ? 255 : _gray;
d_dst[pos_y * width + pos_x] = gray;
}
/*! Process the specified image. First convert the image from rgb to grayscale, and
after this apply and average filter of size 3x3.
\param input_image Image to process.
\return The processed image
*/
cv::Mat processImage(const cv::Mat &input_image){
cv::Mat input_image_gray(cv::Size(input_image.cols, input_image.rows), CV_8UC1);
cv::Mat averaged_image;
unsigned char *d_src;
unsigned char *d_dst;
auto start = std::chrono::steady_clock::now();
// Memory allocation
hipMalloc((void**)&d_src, input_image.cols * input_image.rows * 3 *sizeof(unsigned char));
hipMalloc((void**)&d_dst, input_image.cols * input_image.rows * sizeof(unsigned char));
// Copy src image to device
hipMemcpy(d_src, input_image.data, input_image.cols * input_image.rows * 3 *sizeof(unsigned char), hipMemcpyHostToDevice);
auto end = std::chrono::steady_clock::now();
std::cout<< "Transfer time "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms"<<std::endl;
//Launch the kernel
start = std::chrono::steady_clock::now();
//Scheme definition
dim3 blkDim (32, 32, 1);
dim3 grdDim ((input_image.cols + 31)/32, (input_image.rows + 31)/32, 1);
hipLaunchKernelGGL(( rgb2gray_ver1), dim3(grdDim), dim3(blkDim), 0, 0, d_src, d_dst, input_image.cols, input_image.rows);
//Ver2
/*
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input siz
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, rgb2gray_ver2, 0, (input_image.cols * input_image.rows));
// Round up according to array size
gridSize = ((input_image.cols * input_image.rows) + blockSize - 1) / blockSize;
rgb2gray_ver2<<<gridSize, blockSize>>>(d_src, d_dst, input_image.cols, input_image.rows);
*/
//Wait until kernel finishes
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
std::cout<< "Processing time "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms"<<std::endl;
//copy back the result to CPU
start = std::chrono::steady_clock::now();
hipMemcpy(input_image_gray.data, d_dst, input_image.cols * input_image.rows * sizeof(unsigned char), hipMemcpyDeviceToHost);
end = std::chrono::steady_clock::now();
std::cout<< "Transfer time "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms"<<std::endl;
// Free memory
hipFree(d_src);
hipFree(d_dst);
// Convert RGB to Grayscale
//rgb2gray(input_image, input_image_gray);
// Average Filter
averaged_image = averageFilter(input_image_gray);
return averaged_image;
}
/*! Process all the images defined on the std::vector.
\param filenames Vector of directory_entry with that containsl all the image to process
*/
void processImages(std::vector<directory_entry> filenames){
std::vector<cv::Mat> imagesToProcess;
std::vector<directory_entry>::iterator it;
// Load images
for(it = filenames.begin(); it != filenames.end(); it++ ) {
std::cout << "Processing image " << it->path() << ":" << std::endl;
cv::Mat img = cv::imread(it->path().c_str());
imagesToProcess.push_back(img);
//
}
// Process images
auto i = std::begin(imagesToProcess);
int num_image=0;
while (i != std::end(imagesToProcess)) {
cv::imwrite(std::to_string(num_image) + "output.tiff", processImage(*i));
i = imagesToProcess.erase(i);
num_image++;
}
}
int main(int argc, char* argv[])
{
if (argc < 2)
{
std::cout << "Usage: serial_base <path_img_folder>\n";
return 1;
}
std::vector<directory_entry> filenames; // To save the file names in a vector.
path input_path (argv[1]); // To define the path
try
{
if (exists(input_path))
{
if (is_directory(input_path))
{
// Add the filenames to the vector
copy(directory_iterator(input_path), directory_iterator(),
back_inserter(filenames));
// Process the all the images
processImages(filenames);
}
else{
std::cout << input_path << " exists, but is not a directory\n";
}
}
else
std::cout << input_path << " does not exist\n";
}
catch (const filesystem_error& ex)
{
std::cout << ex.what() << '\n';
}
return 0;
}
| c0a6e673d4ffb98cc1de782223c17a7828da3232.cu | #include <string>
#include <iostream>
// 3rd Party Libraries
#include <boost/filesystem.hpp>
#include <opencv2/opencv.hpp>
#include <chrono>
using namespace boost::filesystem;
/*! Convert the specified image to grayscale.
\param input_image Image to process.
\return Output image.
*/
cv::Mat averageFilter(const cv::Mat &input_image){
int cols = input_image.cols;
int rows = input_image.rows;
// Simplify borders problem
cv::Mat input_image_extra_border(cv::Size(cols, rows), CV_8UC1);
cv::Mat output_image(cv::Size(cols, rows), CV_8UC1);
cv::copyMakeBorder(input_image, input_image_extra_border, 1, 1, 1, 1, cv::BORDER_REPLICATE);
std::cout << "\tInput image size: " << input_image.rows << ", " << input_image.cols << std::endl;
std::cout << "\tInput with border image size: " << input_image_extra_border.rows << ", "
<< input_image_extra_border.cols << std::endl;
const int offset[9] = { -(int) input_image.step + 1, -(int) input_image.step, -(int) input_image.step - 1, \
-1, 0, +1, \
(int) input_image.step - 1, (int) input_image.step, (int) input_image.step + 1};
//Pointers
unsigned char *input_ptr = (unsigned char*)(input_image_extra_border.data);
unsigned char *output_ptr = (unsigned char*)(output_image.data);
for(int i = 1;i < input_image.rows;i++){
for(int j = 1;j < input_image.cols;j++){
int average = 0 ;
for (int k = 0; k < 9; ++k)
average += input_ptr[input_image_extra_border.step * i + j + offset[k]];
average = average / 9;
output_ptr[output_image.step * (i - 1) + (j - 1)] = (unsigned char) average;
}
}
return output_image;
}
/*! Convert the specified image to grayscale.
\param input_image Image to process.
\param input_image Output image.
*/
void rgb2gray(const cv::Mat &input_image, cv::Mat &output_image){
//Pointers
unsigned char *input_ptr = (unsigned char*)(input_image.data);
unsigned char *output_ptr = (unsigned char*)(output_image.data);
for(int i = 0;i < input_image.rows;i++){
for(int j = 0;j < input_image.cols;j++){
unsigned char b = input_ptr[input_image.step * i + (j * 3) ] ;
unsigned char g = input_ptr[input_image.step * i + (j * 3) + 1];
unsigned char r = input_ptr[input_image.step * i + (j * 3) + 2];
output_ptr[output_image.step * i + j] = 0.21 * r + 0.72 * g + 0.07 * b;
}
}
}
__global__ void boxFilter3x3_ver1 (unsigned char * srcD, unsigned char * dstD, int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y>=height)
return;
int widthSrc = width + 2;
int heightSrc = height + 2;
int sum = 0
unsigned char i0, i1, i2, i3, i4, i5, i6, i7, i8;
i0 = srcD[widthSrc * y + x];
i1 = srcD[widthSrc * y + x + 1];
i2 = srcD[widthSrc * y + x + 2];
i3 = srcD[widthSrc * (y + 1) + x];
i4 = srcD[widthSrc * (y + 1) + x + 1];
i5 = srcD[widthSrc * (y + 1) + x + 2];
i6 = srcD[widthSrc * (y + 2) + x];
i7 = srcD[widthSrc * (y + 2) + x + 1];
i8 = srcD[widthSrc * (y + 2) + x + 2];
dstD[y * width + x] = (i0 + i1 + i2 + i3 + i4 + i5 + i6 + i7 + i8 ) / 9;
}
__global__ void rgb2gray_ver2(unsigned char * d_src, unsigned char * d_dst, int width, int height)
{
int pos_x = blockIdx.x * blockDim.x + threadIdx.x;
if (pos_x >= (width * height))
return;
unsigned char b = d_src[(pos_x * 3)];
unsigned char g = d_src[(pos_x * 3) + 1];
unsigned char r = d_src[(pos_x * 3) + 2];
unsigned int _gray = (unsigned int)((float)(0.21 * r + 0.72 * g + 0.07 * b));
unsigned char gray = _gray > 255 ? 255 : _gray;
d_dst[pos_x] = gray;
}
__global__ void rgb2gray_ver1(unsigned char * d_src, unsigned char * d_dst, int width, int height)
{
int pos_x = blockIdx.x * blockDim.x + threadIdx.x;
int pos_y = blockIdx.y * blockDim.y + threadIdx.y;
if (pos_x >= width || pos_y >= height )
return;
unsigned char b = d_src[pos_y * (width * 3) + (pos_x * 3)];
unsigned char g = d_src[pos_y * (width * 3) + (pos_x * 3) + 1];
unsigned char r = d_src[pos_y * (width * 3) + (pos_x * 3) + 2];
unsigned int _gray = (unsigned int)((float)(0.21 * r + 0.72 * g + 0.07 * b));
unsigned char gray = _gray > 255 ? 255 : _gray;
d_dst[pos_y * width + pos_x] = gray;
}
/*! Process the specified image. First convert the image from rgb to grayscale, and
after this apply and average filter of size 3x3.
\param input_image Image to process.
\return The processed image
*/
cv::Mat processImage(const cv::Mat &input_image){
cv::Mat input_image_gray(cv::Size(input_image.cols, input_image.rows), CV_8UC1);
cv::Mat averaged_image;
unsigned char *d_src;
unsigned char *d_dst;
auto start = std::chrono::steady_clock::now();
// Memory allocation
cudaMalloc((void**)&d_src, input_image.cols * input_image.rows * 3 *sizeof(unsigned char));
cudaMalloc((void**)&d_dst, input_image.cols * input_image.rows * sizeof(unsigned char));
// Copy src image to device
cudaMemcpy(d_src, input_image.data, input_image.cols * input_image.rows * 3 *sizeof(unsigned char), cudaMemcpyHostToDevice);
auto end = std::chrono::steady_clock::now();
std::cout<< "Transfer time "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms"<<std::endl;
//Launch the kernel
start = std::chrono::steady_clock::now();
//Scheme definition
dim3 blkDim (32, 32, 1);
dim3 grdDim ((input_image.cols + 31)/32, (input_image.rows + 31)/32, 1);
rgb2gray_ver1<<<grdDim, blkDim>>>(d_src, d_dst, input_image.cols, input_image.rows);
//Ver2
/*
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input siz
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, rgb2gray_ver2, 0, (input_image.cols * input_image.rows));
// Round up according to array size
gridSize = ((input_image.cols * input_image.rows) + blockSize - 1) / blockSize;
rgb2gray_ver2<<<gridSize, blockSize>>>(d_src, d_dst, input_image.cols, input_image.rows);
*/
//Wait until kernel finishes
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
std::cout<< "Processing time "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms"<<std::endl;
//copy back the result to CPU
start = std::chrono::steady_clock::now();
cudaMemcpy(input_image_gray.data, d_dst, input_image.cols * input_image.rows * sizeof(unsigned char), cudaMemcpyDeviceToHost);
end = std::chrono::steady_clock::now();
std::cout<< "Transfer time "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms"<<std::endl;
// Free memory
cudaFree(d_src);
cudaFree(d_dst);
// Convert RGB to Grayscale
//rgb2gray(input_image, input_image_gray);
// Average Filter
averaged_image = averageFilter(input_image_gray);
return averaged_image;
}
/*! Process all the images defined on the std::vector.
\param filenames Vector of directory_entry with that containsl all the image to process
*/
void processImages(std::vector<directory_entry> filenames){
std::vector<cv::Mat> imagesToProcess;
std::vector<directory_entry>::iterator it;
// Load images
for(it = filenames.begin(); it != filenames.end(); it++ ) {
std::cout << "Processing image " << it->path() << ":" << std::endl;
cv::Mat img = cv::imread(it->path().c_str());
imagesToProcess.push_back(img);
//
}
// Process images
auto i = std::begin(imagesToProcess);
int num_image=0;
while (i != std::end(imagesToProcess)) {
cv::imwrite(std::to_string(num_image) + "output.tiff", processImage(*i));
i = imagesToProcess.erase(i);
num_image++;
}
}
int main(int argc, char* argv[])
{
if (argc < 2)
{
std::cout << "Usage: serial_base <path_img_folder>\n";
return 1;
}
std::vector<directory_entry> filenames; // To save the file names in a vector.
path input_path (argv[1]); // To define the path
try
{
if (exists(input_path))
{
if (is_directory(input_path))
{
// Add the filenames to the vector
copy(directory_iterator(input_path), directory_iterator(),
back_inserter(filenames));
// Process the all the images
processImages(filenames);
}
else{
std::cout << input_path << " exists, but is not a directory\n";
}
}
else
std::cout << input_path << " does not exist\n";
}
catch (const filesystem_error& ex)
{
std::cout << ex.what() << '\n';
}
return 0;
}
|
390932c46bbca0a8f824c3ff577527f9aef16137.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2010 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Software DMA project
*
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
//#define CUDADMA_DEBUG_ON
#include "cudaDMA.h"
#include "params_directed.h"
#define WARP_SIZE 32
// includes, project
// includes, kernels
#define CUDA_SAFE_CALL(x) \
{ \
hipError_t err = (x); \
if (err != hipSuccess) \
{ \
printf("Cuda error: %s\n", hipGetErrorString(err)); \
exit(false); \
} \
}
// I hate global variables, but whatever
long total_experiments = 0;
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int NUM_ELMTS, int DMA_THREADS>
__global__ void __launch_bounds__(1024,1)
dma_ld_test_four ( float *idata, float *odata, int src_stride/*bytes*/, int dst_stride/*bytes*/, int buffer_size /*number of floats*/, int num_compute_threads)
{
extern __shared__ float buffer[];
cudaDMAStrided<true,ALIGNMENT,BYTES_PER_ELMT,DMA_THREADS,NUM_ELMTS>
dma0 (1, num_compute_threads,
num_compute_threads,
src_stride,
dst_stride);
if (dma0.owns_this_thread())
{
float *base_ptr = &(idata[ALIGN_OFFSET]);
#ifdef CUDADMA_DEBUG_ON
dma0.wait_for_dma_start();
dma0.finish_async_dma();
#else
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
#endif
}
else
{
// Zero out the buffer
int iters = buffer_size/num_compute_threads;
int index = threadIdx.x;
for (int i=0; i<iters; i++)
{
buffer[index] = 0.0f;
index += num_compute_threads;
}
if (index < buffer_size)
buffer[index] = 0.0f;
dma0.start_async_dma();
dma0.wait_for_dma_finish();
// Now read the buffer out of shared and write the results back
index = threadIdx.x;
for (int i=0; i<iters; i++)
{
float res = buffer[index];
odata[index] = res;
index += num_compute_threads;
}
if (index < buffer_size)
{
float res = buffer[index];
odata[index] = res;
}
}
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int DMA_THREADS>
__global__ void __launch_bounds__(1024,1)
dma_ld_test_three ( float *idata, float *odata, int src_stride/*bytes*/, int dst_stride/*bytes*/, int buffer_size /*number of floats*/, int num_compute_threads, int num_elmts)
{
extern __shared__ float buffer[];
cudaDMAStrided<true,ALIGNMENT,BYTES_PER_ELMT,DMA_THREADS>
dma0 (1, num_compute_threads,
num_compute_threads,
num_elmts,
src_stride,
dst_stride);
if (dma0.owns_this_thread())
{
float *base_ptr = &(idata[ALIGN_OFFSET]);
#ifdef CUDADMA_DEBUG_ON
dma0.wait_for_dma_start();
dma0.finish_async_dma();
#else
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
#endif
}
else
{
// Zero out the buffer
int iters = buffer_size/num_compute_threads;
int index = threadIdx.x;
for (int i=0; i<iters; i++)
{
buffer[index] = 0.0f;
index += num_compute_threads;
}
if (index < buffer_size)
buffer[index] = 0.0f;
dma0.start_async_dma();
dma0.wait_for_dma_finish();
// Now read the buffer out of shared and write the results back
index = threadIdx.x;
for (int i=0; i<iters; i++)
{
float res = buffer[index];
odata[index] = res;
index += num_compute_threads;
}
if (index < buffer_size)
{
float res = buffer[index];
odata[index] = res;
}
}
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT>
__global__ void __launch_bounds__(1024,1)
dma_ld_test_two ( float *idata, float *odata, int src_stride/*bytes*/, int dst_stride/*bytes*/, int buffer_size /*number of floats*/, int num_compute_threads, int num_elmts, int dma_threads)
{
extern __shared__ float buffer[];
cudaDMAStrided<true,ALIGNMENT,BYTES_PER_ELMT>
dma0 (1, dma_threads, num_compute_threads,
num_compute_threads,
num_elmts,
src_stride,
dst_stride);
if (dma0.owns_this_thread())
{
float *base_ptr = &(idata[ALIGN_OFFSET]);
#ifdef CUDADMA_DEBUG_ON
dma0.wait_for_dma_start();
dma0.finish_async_dma();
#else
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
#endif
}
else
{
// Zero out the buffer
int iters = buffer_size/num_compute_threads;
int index = threadIdx.x;
for (int i=0; i<iters; i++)
{
buffer[index] = 0.0f;
index += num_compute_threads;
}
if (index < buffer_size)
buffer[index] = 0.0f;
dma0.start_async_dma();
dma0.wait_for_dma_finish();
// Now read the buffer out of shared and write the results back
index = threadIdx.x;
for (int i=0; i<iters; i++)
{
float res = buffer[index];
odata[index] = res;
index += num_compute_threads;
}
if (index < buffer_size)
{
float res = buffer[index];
odata[index] = res;
}
}
}
template<int ALIGNMENT, int ALIGN_OFFSET>
__global__ void __launch_bounds__(1024,1)
dma_ld_test_one ( float *idata, float *odata, int src_stride/*bytes*/, int dst_stride/*bytes*/, int buffer_size /*number of floats*/, int num_compute_threads, int bytes_per_elmt, int num_elmts, int dma_threads)
{
extern __shared__ float buffer[];
cudaDMAStrided<true,ALIGNMENT>
dma0 (1, dma_threads,
num_compute_threads,
num_compute_threads,
bytes_per_elmt,
num_elmts,
src_stride,
dst_stride);
if (dma0.owns_this_thread())
{
float *base_ptr = &(idata[ALIGN_OFFSET]);
#ifdef CUDADMA_DEBUG_ON
dma0.wait_for_dma_start();
dma0.finish_async_dma();
#else
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
#endif
}
else
{
// Zero out the buffer
int iters = buffer_size/num_compute_threads;
int index = threadIdx.x;
for (int i=0; i<iters; i++)
{
buffer[index] = 0.0f;
index += num_compute_threads;
}
if (index < buffer_size)
buffer[index] = 0.0f;
dma0.start_async_dma();
dma0.wait_for_dma_finish();
// Now read the buffer out of shared and write the results back
index = threadIdx.x;
for (int i=0; i<iters; i++)
{
float res = buffer[index];
odata[index] = res;
index += num_compute_threads;
}
if (index < buffer_size)
{
float res = buffer[index];
odata[index] = res;
}
}
}
__device__
void zero_buffer(float *buffer, const int buffer_size)
{
int iters = buffer_size/blockDim.x;
int index = threadIdx.x;
for (int i = 0; i<iters; i++)
{
buffer[index] = 0.0f;
index += blockDim.x;
}
if (index < buffer_size)
buffer[index] = 0.0f;
}
__device__
void copy_buffer(float *buffer, float *dst, const int buffer_size)
{
int iters = buffer_size/blockDim.x;
int index = threadIdx.x;
for (int i = 0; i<iters; i++)
{
dst[index] = buffer[index];
index += blockDim.x;
}
if (index < buffer_size)
dst[index] = buffer[index];
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int NUM_ELMTS, int DMA_THREADS>
__global__ void __launch_bounds__(1024,1)
simple_test_four(float *idata, float *odata, int src_stride, int dst_stride, int buffer_size)
{
extern __shared__ float buffer[];
cudaDMAStrided<false,ALIGNMENT,BYTES_PER_ELMT,DMA_THREADS,NUM_ELMTS>
dma0(src_stride, dst_stride);
zero_buffer(buffer, buffer_size);
__syncthreads();
float *base_ptr = &(idata[ALIGN_OFFSET]);
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
__syncthreads();
copy_buffer(buffer,odata,buffer_size);
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int DMA_THREADS>
__global__ void __launch_bounds__(1024,1)
simple_test_three(float *idata, float *odata, int src_stride, int dst_stride, int buffer_size, int num_elmts)
{
extern __shared__ float buffer[];
cudaDMAStrided<false,ALIGNMENT,BYTES_PER_ELMT,DMA_THREADS>
dma0(num_elmts, src_stride, dst_stride);
zero_buffer(buffer, buffer_size);
__syncthreads();
float *base_ptr = &(idata[ALIGN_OFFSET]);
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
__syncthreads();
copy_buffer(buffer, odata, buffer_size);
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT>
__global__ void __launch_bounds__(1024,1)
simple_test_two(float *idata, float *odata, int src_stride, int dst_stride, int buffer_size, int num_elmts)
{
extern __shared__ float buffer[];
cudaDMAStrided<false,ALIGNMENT,BYTES_PER_ELMT>
dma0(num_elmts, src_stride, dst_stride);
zero_buffer(buffer, buffer_size);
__syncthreads();
float *base_ptr = &(idata[ALIGN_OFFSET]);
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
__syncthreads();
copy_buffer(buffer, odata, buffer_size);
}
template<int ALIGNMENT, int ALIGN_OFFSET>
__global__ void __launch_bounds__(1024,1)
simple_test_one(float *idata, float *odata, int src_stride, int dst_stride, int buffer_size, int bytes_per_elmt, int num_elmts)
{
extern __shared__ float buffer[];
cudaDMAStrided<false,ALIGNMENT>
dma0(bytes_per_elmt, num_elmts, src_stride, dst_stride);
zero_buffer(buffer, buffer_size);
__syncthreads();
float *base_ptr = &(idata[ALIGN_OFFSET]);
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
__syncthreads();
copy_buffer(buffer, odata, buffer_size);
}
template<bool SPECIALIZED, int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int NUM_ELMTS, int DMA_THREADS, int NUM_TEMPLATE_PARAMS>
__host__ bool run_experiment(int src_stride /*in floats*/, int dst_stride/*in floats*/)
{
// check some assertions
assert(BYTES_PER_ELMT <= src_stride*sizeof(float));
assert(BYTES_PER_ELMT <= dst_stride*sizeof(float));
int shared_buffer_size = (NUM_ELMTS*dst_stride + ALIGN_OFFSET);
// Check to see if we're using more shared memory than there is, if so return
if ((shared_buffer_size*sizeof(float)) > 49152)
return true;
// Allocate the inpute data
int input_size = (NUM_ELMTS*src_stride+ALIGN_OFFSET);
float *h_idata = (float*)malloc(input_size*sizeof(float));
for (int i=0; i<input_size; i++)
h_idata[i] = float(i);
// Allocate device memory and copy down
float *d_idata;
CUDA_SAFE_CALL( hipMalloc( (void**)&d_idata, input_size*sizeof(float)));
CUDA_SAFE_CALL( hipMemcpy( d_idata, h_idata, input_size*sizeof(float), hipMemcpyHostToDevice));
// Allocate the output size
int output_size = (NUM_ELMTS*dst_stride+ALIGN_OFFSET);
float *h_odata = (float*)malloc(output_size*sizeof(float));
for (int i=0; i<output_size; i++)
h_odata[i] = 0.0f;
// Allocate device memory and copy down
float *d_odata;
CUDA_SAFE_CALL( hipMalloc( (void**)&d_odata, output_size*sizeof(float)));
CUDA_SAFE_CALL( hipMemcpy( d_odata, h_odata, output_size*sizeof(float), hipMemcpyHostToDevice));
int num_compute_warps = 1;
int total_threads = 0;
if (SPECIALIZED)
total_threads = (num_compute_warps)*WARP_SIZE + DMA_THREADS;
else
total_threads = DMA_THREADS;
assert(total_threads > 0);
switch (NUM_TEMPLATE_PARAMS)
{
case 1:
if (SPECIALIZED)
{
hipLaunchKernelGGL(( dma_ld_test_one<ALIGNMENT,ALIGN_OFFSET>)
, dim3(1),dim3(total_threads),shared_buffer_size*sizeof(float),0,
d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, num_compute_warps*WARP_SIZE,
BYTES_PER_ELMT,NUM_ELMTS,DMA_THREADS);
}
else
{
hipLaunchKernelGGL(( simple_test_one<ALIGNMENT,ALIGN_OFFSET>)
, dim3(1),dim3(total_threads),shared_buffer_size*sizeof(float),0,
d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, BYTES_PER_ELMT, NUM_ELMTS);
}
break;
case 2:
if (SPECIALIZED)
{
hipLaunchKernelGGL(( dma_ld_test_two<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT>)
, dim3(1),dim3(total_threads),shared_buffer_size*sizeof(float),0,
d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, num_compute_warps*WARP_SIZE,
NUM_ELMTS,DMA_THREADS);
}
else
{
hipLaunchKernelGGL(( simple_test_two<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT>)
, dim3(1),dim3(total_threads),shared_buffer_size*sizeof(float),0,
d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, NUM_ELMTS);
}
break;
case 3:
if (SPECIALIZED)
{
hipLaunchKernelGGL(( dma_ld_test_three<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,DMA_THREADS>)
, dim3(1),dim3(total_threads),shared_buffer_size*sizeof(float),0,
d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, num_compute_warps*WARP_SIZE,
NUM_ELMTS);
}
else
{
hipLaunchKernelGGL(( simple_test_three<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,DMA_THREADS>)
, dim3(1),dim3(total_threads),shared_buffer_size*sizeof(float),0,
d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, NUM_ELMTS);
}
break;
case 4:
if (SPECIALIZED)
{
hipLaunchKernelGGL(( dma_ld_test_four<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,DMA_THREADS>)
, dim3(1),dim3(total_threads),shared_buffer_size*sizeof(float),0,
d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, num_compute_warps*WARP_SIZE);
}
else
{
hipLaunchKernelGGL(( simple_test_four<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,DMA_THREADS>)
, dim3(1),dim3(total_threads),shared_buffer_size*sizeof(float),0,
d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size);
}
break;
default:
assert(false);
break;
}
CUDA_SAFE_CALL( hipDeviceSynchronize());
CUDA_SAFE_CALL( hipMemcpy (h_odata, d_odata, output_size*sizeof(float), hipMemcpyDeviceToHost));
// Check the result
bool pass = true;
for (int i=0; i<NUM_ELMTS && pass; i++)
{
int in_index = ALIGN_OFFSET+i*src_stride;
int out_index = ALIGN_OFFSET+i*dst_stride;
for (int j=0; j<(BYTES_PER_ELMT/sizeof(float)); j++)
{
//printf("%f ",h_odata[out_index+j]);
if (h_idata[in_index+j] != h_odata[out_index+j])
{
fprintf(stderr,"Experiment: %d element bytes, %d elements, %ld source stride, %ld destination stride, %d DMA warps, %d alignment, %d offset, ",BYTES_PER_ELMT,NUM_ELMTS,src_stride*sizeof(float),dst_stride*sizeof(float),DMA_THREADS/WARP_SIZE,ALIGNMENT,ALIGN_OFFSET);
fprintf(stderr,"Index %d of element %d was expecting %f but received %f\n", j, i, h_idata[in_index+j], h_odata[out_index+j]);
pass = false;
break;
}
}
//printf("\n");
}
if (!pass)
{
fprintf(stdout,"Result - %s\n",(pass?"SUCCESS":"FAILURE"));
fflush(stdout);
}
// Free up the remaining memory
CUDA_SAFE_CALL( hipFree(d_idata));
CUDA_SAFE_CALL( hipFree(d_odata));
free(h_idata);
free(h_odata);
total_experiments++;
return pass;
}
#if 0
template<int ALIGNMENT, int ALIGN_OFFSET>
__host__
bool run_all_experiments(int max_element_size, int max_element_count,
int max_dma_warps)
{
bool pass = true;
for (int element_size=1; element_size <= max_element_size; element_size++)
{
fprintf(stdout,"Testing cases with element_size %ld - alignment %d - offset %d...\n",element_size*sizeof(float), ALIGNMENT, ALIGN_OFFSET);
fflush(stdout);
for (int element_count=1; element_count <= max_element_count; element_count++)
{
// Get the initial source stride from the element size with the given alignment
const int min_stride = element_size + (element_size%(ALIGNMENT/sizeof(float)) ?
((ALIGNMENT/sizeof(float))-(element_size%(ALIGNMENT/sizeof(float)))) : 0);
// Let's only check full stride cases if element_size is divisible by 31 so we can
// make the search space a little sparser and also test lots of potential strides
// on weird alignment offsets
if ((element_size<1024) && (element_size%127)==0)
{
// Make each of the strides range from min_stride to 2*min_stride
// This should cover all of the cases for a given element size
// Anything larger is modulo equivalent to a smaller stride
for (int src_stride=min_stride; src_stride <= (2*min_stride); src_stride += (ALIGNMENT/sizeof(float)))
for (int dst_stride=min_stride; dst_stride <= (2*min_stride); dst_stride += (ALIGNMENT/sizeof(float)))
{
for (int dma_warps=1; dma_warps <= max_dma_warps; dma_warps++)
{
pass = pass && run_experiment<ALIGNMENT,ALIGN_OFFSET>(element_size,
element_count,src_stride,dst_stride,dma_warps);
}
}
}
else
{
// Just test the variable number of dma_warps
for (int dma_warps=1; dma_warps <= max_dma_warps; dma_warps++)
{
pass = pass && run_experiment<ALIGNMENT,ALIGN_OFFSET>(element_size,
element_count,min_stride,min_stride,dma_warps);
}
}
}
}
return pass;
}
#endif
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int NUM_ELMTS>
__host__
void run_all_dma_warps(bool &result)
{
assert(BYTES_PER_ELMT%sizeof(float)==0);
const int element_size = BYTES_PER_ELMT/sizeof(float);
const int min_stride = element_size + (element_size%(ALIGNMENT/sizeof(float)) ?
((ALIGNMENT/sizeof(float))-(element_size%(ALIGNMENT/sizeof(float)))) : 0);
const int warp_size=32;
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,1*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,2*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,3*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,4*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,5*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,6*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,7*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,8*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,9*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,10*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,11*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,12*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,13*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,14*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,15*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,16*warp_size>(min_stride,min_stride);
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT>
__host__
void run_all_num_elements(bool &result)
{
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,1>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,2>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,3>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,4>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,5>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,6>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,7>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,8>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,9>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,10>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,11>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,12>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,13>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,14>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,15>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,16>(result);
#if 0
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,17>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,18>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,19>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,20>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,21>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,22>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,23>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,24>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,25>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,26>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,27>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,28>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,29>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,30>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,31>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,32>(result);
#endif
}
#if 0
template<int ALIGNMENT, int ALIGN_OFFSET>
__host__
bool run_all_experiments()
{
bool result = true;
{
const int base=0;
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=64;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=128;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=192;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=256;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=320;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=384;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=448;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=512;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=576;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=640;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=704;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=768;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=832;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=896;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=960;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
return result;
}
#endif
__host__
int main()
{
#if 0
bool success16_0 = true;
bool success08_0 = true;
bool success08_2 = true;
bool success04_0 = true;
bool success04_1 = true;
bool success04_2 = true;
bool success04_3 = true;
#endif
// An 8192*sizeof(float) element is almost all of shared memory (can only fit one)
// We probably want to test up to 32 elements per stride if we're going to have up to 16 dma warps
//success16_0 = success16_0 && run_all_experiments<16,0>(8192,32,16);
//success08_0 = success08_0 && run_all_experiments<8,0>(8192,32,16);
//success08_2 = success08_2 && run_all_experiments<8,2>(8192,32,16);
//success04_0 = success04_0 && run_all_experiments<4,0>(8192,32,16);
//success04_1 = success04_1 && run_all_experiments<4,1>(8192,32,16);
//success04_2 = success04_2 && run_all_experiments<4,2>(8192,32,16);
//success04_3 = success04_3 && run_all_experiments<4,3>(8192,32,16);
#if 1
const int element_size = PARAM_ELMT_SIZE/sizeof(float);
const int min_stride = element_size + (element_size%(PARAM_ALIGNMENT/sizeof(float)) ?
((PARAM_ALIGNMENT/sizeof(float))-(element_size%(PARAM_ALIGNMENT/sizeof(float)))) : 0);
if (PARAM_SPECIALIZED)
fprintf(stdout,"Warp-Specialied Experiment: ALIGNMENT-%2d OFFSET-%d ELMT_SIZE-%5d NUM_ELMTS-%2d DMA_WARPS-%2d NUM_TEMPLATES-%d ",PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE,PARAM_NUM_ELMTS,PARAM_DMA_THREADS/WARP_SIZE,PARAM_NUM_TEMPLATES);
else
fprintf(stdout,"Non-Warp-Specialized Experiment: ALIGNMENT-%2d OFFSET-%d ELMT_SIZE-%5d NUM_ELMTS-%2d TOTAL_WARPS-%2d NUM_TEMPLATES-%d ",PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE,PARAM_NUM_ELMTS,PARAM_DMA_THREADS/WARP_SIZE,PARAM_NUM_TEMPLATES);
fflush(stdout);
bool result = run_experiment<PARAM_SPECIALIZED,PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE,PARAM_NUM_ELMTS,PARAM_DMA_THREADS,PARAM_NUM_TEMPLATES>(min_stride,min_stride);
fprintf(stdout,"RESULT: %s\n",(result?"SUCCESS":"FAILURE"));
fflush(stdout);
#else
bool result = true;
printf("Running all experiments for ALIGNMENT-%d OFFSET-%d ELMT_SIZE-%d... ",PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE);
run_all_num_elements<PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE>(result);
printf("%s\n",(result?"SUCCESS":"FAILURE"));
#endif
//success16_0 = success16_0 && run_all_experiments<16,0>();
#if 0
fprintf(stdout,"\nResults:\n");
fprintf(stdout,"\tAlignment16-Offset0: %s\n",(success16_0?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment08-Offset0: %s\n",(success08_0?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment08-Offset2: %s\n",(success08_2?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment04-Offset0: %s\n",(success04_0?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment04-Offset1: %s\n",(success04_1?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment04-Offset2: %s\n",(success04_2?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment04-Offset3: %s\n",(success04_3?"SUCCESS":"FAILURE"));
fprintf(stdout,"\n\tTotal Experiments - %ld\n", total_experiments);
#endif
return result;
}
| 390932c46bbca0a8f824c3ff577527f9aef16137.cu | /*
* Copyright 2010 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Software DMA project
*
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include "cuda.h"
#include "cuda_runtime.h"
//#define CUDADMA_DEBUG_ON
#include "cudaDMA.h"
#include "params_directed.h"
#define WARP_SIZE 32
// includes, project
// includes, kernels
#define CUDA_SAFE_CALL(x) \
{ \
cudaError_t err = (x); \
if (err != cudaSuccess) \
{ \
printf("Cuda error: %s\n", cudaGetErrorString(err)); \
exit(false); \
} \
}
// I hate global variables, but whatever
long total_experiments = 0;
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int NUM_ELMTS, int DMA_THREADS>
__global__ void __launch_bounds__(1024,1)
dma_ld_test_four ( float *idata, float *odata, int src_stride/*bytes*/, int dst_stride/*bytes*/, int buffer_size /*number of floats*/, int num_compute_threads)
{
extern __shared__ float buffer[];
cudaDMAStrided<true,ALIGNMENT,BYTES_PER_ELMT,DMA_THREADS,NUM_ELMTS>
dma0 (1, num_compute_threads,
num_compute_threads,
src_stride,
dst_stride);
if (dma0.owns_this_thread())
{
float *base_ptr = &(idata[ALIGN_OFFSET]);
#ifdef CUDADMA_DEBUG_ON
dma0.wait_for_dma_start();
dma0.finish_async_dma();
#else
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
#endif
}
else
{
// Zero out the buffer
int iters = buffer_size/num_compute_threads;
int index = threadIdx.x;
for (int i=0; i<iters; i++)
{
buffer[index] = 0.0f;
index += num_compute_threads;
}
if (index < buffer_size)
buffer[index] = 0.0f;
dma0.start_async_dma();
dma0.wait_for_dma_finish();
// Now read the buffer out of shared and write the results back
index = threadIdx.x;
for (int i=0; i<iters; i++)
{
float res = buffer[index];
odata[index] = res;
index += num_compute_threads;
}
if (index < buffer_size)
{
float res = buffer[index];
odata[index] = res;
}
}
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int DMA_THREADS>
__global__ void __launch_bounds__(1024,1)
dma_ld_test_three ( float *idata, float *odata, int src_stride/*bytes*/, int dst_stride/*bytes*/, int buffer_size /*number of floats*/, int num_compute_threads, int num_elmts)
{
extern __shared__ float buffer[];
cudaDMAStrided<true,ALIGNMENT,BYTES_PER_ELMT,DMA_THREADS>
dma0 (1, num_compute_threads,
num_compute_threads,
num_elmts,
src_stride,
dst_stride);
if (dma0.owns_this_thread())
{
float *base_ptr = &(idata[ALIGN_OFFSET]);
#ifdef CUDADMA_DEBUG_ON
dma0.wait_for_dma_start();
dma0.finish_async_dma();
#else
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
#endif
}
else
{
// Zero out the buffer
int iters = buffer_size/num_compute_threads;
int index = threadIdx.x;
for (int i=0; i<iters; i++)
{
buffer[index] = 0.0f;
index += num_compute_threads;
}
if (index < buffer_size)
buffer[index] = 0.0f;
dma0.start_async_dma();
dma0.wait_for_dma_finish();
// Now read the buffer out of shared and write the results back
index = threadIdx.x;
for (int i=0; i<iters; i++)
{
float res = buffer[index];
odata[index] = res;
index += num_compute_threads;
}
if (index < buffer_size)
{
float res = buffer[index];
odata[index] = res;
}
}
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT>
__global__ void __launch_bounds__(1024,1)
dma_ld_test_two ( float *idata, float *odata, int src_stride/*bytes*/, int dst_stride/*bytes*/, int buffer_size /*number of floats*/, int num_compute_threads, int num_elmts, int dma_threads)
{
extern __shared__ float buffer[];
cudaDMAStrided<true,ALIGNMENT,BYTES_PER_ELMT>
dma0 (1, dma_threads, num_compute_threads,
num_compute_threads,
num_elmts,
src_stride,
dst_stride);
if (dma0.owns_this_thread())
{
float *base_ptr = &(idata[ALIGN_OFFSET]);
#ifdef CUDADMA_DEBUG_ON
dma0.wait_for_dma_start();
dma0.finish_async_dma();
#else
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
#endif
}
else
{
// Zero out the buffer
int iters = buffer_size/num_compute_threads;
int index = threadIdx.x;
for (int i=0; i<iters; i++)
{
buffer[index] = 0.0f;
index += num_compute_threads;
}
if (index < buffer_size)
buffer[index] = 0.0f;
dma0.start_async_dma();
dma0.wait_for_dma_finish();
// Now read the buffer out of shared and write the results back
index = threadIdx.x;
for (int i=0; i<iters; i++)
{
float res = buffer[index];
odata[index] = res;
index += num_compute_threads;
}
if (index < buffer_size)
{
float res = buffer[index];
odata[index] = res;
}
}
}
template<int ALIGNMENT, int ALIGN_OFFSET>
__global__ void __launch_bounds__(1024,1)
dma_ld_test_one ( float *idata, float *odata, int src_stride/*bytes*/, int dst_stride/*bytes*/, int buffer_size /*number of floats*/, int num_compute_threads, int bytes_per_elmt, int num_elmts, int dma_threads)
{
extern __shared__ float buffer[];
cudaDMAStrided<true,ALIGNMENT>
dma0 (1, dma_threads,
num_compute_threads,
num_compute_threads,
bytes_per_elmt,
num_elmts,
src_stride,
dst_stride);
if (dma0.owns_this_thread())
{
float *base_ptr = &(idata[ALIGN_OFFSET]);
#ifdef CUDADMA_DEBUG_ON
dma0.wait_for_dma_start();
dma0.finish_async_dma();
#else
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
#endif
}
else
{
// Zero out the buffer
int iters = buffer_size/num_compute_threads;
int index = threadIdx.x;
for (int i=0; i<iters; i++)
{
buffer[index] = 0.0f;
index += num_compute_threads;
}
if (index < buffer_size)
buffer[index] = 0.0f;
dma0.start_async_dma();
dma0.wait_for_dma_finish();
// Now read the buffer out of shared and write the results back
index = threadIdx.x;
for (int i=0; i<iters; i++)
{
float res = buffer[index];
odata[index] = res;
index += num_compute_threads;
}
if (index < buffer_size)
{
float res = buffer[index];
odata[index] = res;
}
}
}
__device__
void zero_buffer(float *buffer, const int buffer_size)
{
int iters = buffer_size/blockDim.x;
int index = threadIdx.x;
for (int i = 0; i<iters; i++)
{
buffer[index] = 0.0f;
index += blockDim.x;
}
if (index < buffer_size)
buffer[index] = 0.0f;
}
__device__
void copy_buffer(float *buffer, float *dst, const int buffer_size)
{
int iters = buffer_size/blockDim.x;
int index = threadIdx.x;
for (int i = 0; i<iters; i++)
{
dst[index] = buffer[index];
index += blockDim.x;
}
if (index < buffer_size)
dst[index] = buffer[index];
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int NUM_ELMTS, int DMA_THREADS>
__global__ void __launch_bounds__(1024,1)
simple_test_four(float *idata, float *odata, int src_stride, int dst_stride, int buffer_size)
{
extern __shared__ float buffer[];
cudaDMAStrided<false,ALIGNMENT,BYTES_PER_ELMT,DMA_THREADS,NUM_ELMTS>
dma0(src_stride, dst_stride);
zero_buffer(buffer, buffer_size);
__syncthreads();
float *base_ptr = &(idata[ALIGN_OFFSET]);
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
__syncthreads();
copy_buffer(buffer,odata,buffer_size);
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int DMA_THREADS>
__global__ void __launch_bounds__(1024,1)
simple_test_three(float *idata, float *odata, int src_stride, int dst_stride, int buffer_size, int num_elmts)
{
extern __shared__ float buffer[];
cudaDMAStrided<false,ALIGNMENT,BYTES_PER_ELMT,DMA_THREADS>
dma0(num_elmts, src_stride, dst_stride);
zero_buffer(buffer, buffer_size);
__syncthreads();
float *base_ptr = &(idata[ALIGN_OFFSET]);
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
__syncthreads();
copy_buffer(buffer, odata, buffer_size);
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT>
__global__ void __launch_bounds__(1024,1)
simple_test_two(float *idata, float *odata, int src_stride, int dst_stride, int buffer_size, int num_elmts)
{
extern __shared__ float buffer[];
cudaDMAStrided<false,ALIGNMENT,BYTES_PER_ELMT>
dma0(num_elmts, src_stride, dst_stride);
zero_buffer(buffer, buffer_size);
__syncthreads();
float *base_ptr = &(idata[ALIGN_OFFSET]);
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
__syncthreads();
copy_buffer(buffer, odata, buffer_size);
}
template<int ALIGNMENT, int ALIGN_OFFSET>
__global__ void __launch_bounds__(1024,1)
simple_test_one(float *idata, float *odata, int src_stride, int dst_stride, int buffer_size, int bytes_per_elmt, int num_elmts)
{
extern __shared__ float buffer[];
cudaDMAStrided<false,ALIGNMENT>
dma0(bytes_per_elmt, num_elmts, src_stride, dst_stride);
zero_buffer(buffer, buffer_size);
__syncthreads();
float *base_ptr = &(idata[ALIGN_OFFSET]);
dma0.execute_dma(base_ptr, &(buffer[ALIGN_OFFSET]));
__syncthreads();
copy_buffer(buffer, odata, buffer_size);
}
template<bool SPECIALIZED, int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int NUM_ELMTS, int DMA_THREADS, int NUM_TEMPLATE_PARAMS>
__host__ bool run_experiment(int src_stride /*in floats*/, int dst_stride/*in floats*/)
{
// check some assertions
assert(BYTES_PER_ELMT <= src_stride*sizeof(float));
assert(BYTES_PER_ELMT <= dst_stride*sizeof(float));
int shared_buffer_size = (NUM_ELMTS*dst_stride + ALIGN_OFFSET);
// Check to see if we're using more shared memory than there is, if so return
if ((shared_buffer_size*sizeof(float)) > 49152)
return true;
// Allocate the inpute data
int input_size = (NUM_ELMTS*src_stride+ALIGN_OFFSET);
float *h_idata = (float*)malloc(input_size*sizeof(float));
for (int i=0; i<input_size; i++)
h_idata[i] = float(i);
// Allocate device memory and copy down
float *d_idata;
CUDA_SAFE_CALL( cudaMalloc( (void**)&d_idata, input_size*sizeof(float)));
CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_idata, input_size*sizeof(float), cudaMemcpyHostToDevice));
// Allocate the output size
int output_size = (NUM_ELMTS*dst_stride+ALIGN_OFFSET);
float *h_odata = (float*)malloc(output_size*sizeof(float));
for (int i=0; i<output_size; i++)
h_odata[i] = 0.0f;
// Allocate device memory and copy down
float *d_odata;
CUDA_SAFE_CALL( cudaMalloc( (void**)&d_odata, output_size*sizeof(float)));
CUDA_SAFE_CALL( cudaMemcpy( d_odata, h_odata, output_size*sizeof(float), cudaMemcpyHostToDevice));
int num_compute_warps = 1;
int total_threads = 0;
if (SPECIALIZED)
total_threads = (num_compute_warps)*WARP_SIZE + DMA_THREADS;
else
total_threads = DMA_THREADS;
assert(total_threads > 0);
switch (NUM_TEMPLATE_PARAMS)
{
case 1:
if (SPECIALIZED)
{
dma_ld_test_one<ALIGNMENT,ALIGN_OFFSET>
<<<1,total_threads,shared_buffer_size*sizeof(float),0>>>
(d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, num_compute_warps*WARP_SIZE,
BYTES_PER_ELMT,NUM_ELMTS,DMA_THREADS);
}
else
{
simple_test_one<ALIGNMENT,ALIGN_OFFSET>
<<<1,total_threads,shared_buffer_size*sizeof(float),0>>>
(d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, BYTES_PER_ELMT, NUM_ELMTS);
}
break;
case 2:
if (SPECIALIZED)
{
dma_ld_test_two<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT>
<<<1,total_threads,shared_buffer_size*sizeof(float),0>>>
(d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, num_compute_warps*WARP_SIZE,
NUM_ELMTS,DMA_THREADS);
}
else
{
simple_test_two<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT>
<<<1,total_threads,shared_buffer_size*sizeof(float),0>>>
(d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, NUM_ELMTS);
}
break;
case 3:
if (SPECIALIZED)
{
dma_ld_test_three<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,DMA_THREADS>
<<<1,total_threads,shared_buffer_size*sizeof(float),0>>>
(d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, num_compute_warps*WARP_SIZE,
NUM_ELMTS);
}
else
{
simple_test_three<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,DMA_THREADS>
<<<1,total_threads,shared_buffer_size*sizeof(float),0>>>
(d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, NUM_ELMTS);
}
break;
case 4:
if (SPECIALIZED)
{
dma_ld_test_four<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,DMA_THREADS>
<<<1,total_threads,shared_buffer_size*sizeof(float),0>>>
(d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size, num_compute_warps*WARP_SIZE);
}
else
{
simple_test_four<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,DMA_THREADS>
<<<1,total_threads,shared_buffer_size*sizeof(float),0>>>
(d_idata, d_odata, src_stride*sizeof(float), dst_stride*sizeof(float), shared_buffer_size);
}
break;
default:
assert(false);
break;
}
CUDA_SAFE_CALL( cudaThreadSynchronize());
CUDA_SAFE_CALL( cudaMemcpy (h_odata, d_odata, output_size*sizeof(float), cudaMemcpyDeviceToHost));
// Check the result
bool pass = true;
for (int i=0; i<NUM_ELMTS && pass; i++)
{
int in_index = ALIGN_OFFSET+i*src_stride;
int out_index = ALIGN_OFFSET+i*dst_stride;
for (int j=0; j<(BYTES_PER_ELMT/sizeof(float)); j++)
{
//printf("%f ",h_odata[out_index+j]);
if (h_idata[in_index+j] != h_odata[out_index+j])
{
fprintf(stderr,"Experiment: %d element bytes, %d elements, %ld source stride, %ld destination stride, %d DMA warps, %d alignment, %d offset, ",BYTES_PER_ELMT,NUM_ELMTS,src_stride*sizeof(float),dst_stride*sizeof(float),DMA_THREADS/WARP_SIZE,ALIGNMENT,ALIGN_OFFSET);
fprintf(stderr,"Index %d of element %d was expecting %f but received %f\n", j, i, h_idata[in_index+j], h_odata[out_index+j]);
pass = false;
break;
}
}
//printf("\n");
}
if (!pass)
{
fprintf(stdout,"Result - %s\n",(pass?"SUCCESS":"FAILURE"));
fflush(stdout);
}
// Free up the remaining memory
CUDA_SAFE_CALL( cudaFree(d_idata));
CUDA_SAFE_CALL( cudaFree(d_odata));
free(h_idata);
free(h_odata);
total_experiments++;
return pass;
}
#if 0
template<int ALIGNMENT, int ALIGN_OFFSET>
__host__
bool run_all_experiments(int max_element_size, int max_element_count,
int max_dma_warps)
{
bool pass = true;
for (int element_size=1; element_size <= max_element_size; element_size++)
{
fprintf(stdout,"Testing cases with element_size %ld - alignment %d - offset %d...\n",element_size*sizeof(float), ALIGNMENT, ALIGN_OFFSET);
fflush(stdout);
for (int element_count=1; element_count <= max_element_count; element_count++)
{
// Get the initial source stride from the element size with the given alignment
const int min_stride = element_size + (element_size%(ALIGNMENT/sizeof(float)) ?
((ALIGNMENT/sizeof(float))-(element_size%(ALIGNMENT/sizeof(float)))) : 0);
// Let's only check full stride cases if element_size is divisible by 31 so we can
// make the search space a little sparser and also test lots of potential strides
// on weird alignment offsets
if ((element_size<1024) && (element_size%127)==0)
{
// Make each of the strides range from min_stride to 2*min_stride
// This should cover all of the cases for a given element size
// Anything larger is modulo equivalent to a smaller stride
for (int src_stride=min_stride; src_stride <= (2*min_stride); src_stride += (ALIGNMENT/sizeof(float)))
for (int dst_stride=min_stride; dst_stride <= (2*min_stride); dst_stride += (ALIGNMENT/sizeof(float)))
{
for (int dma_warps=1; dma_warps <= max_dma_warps; dma_warps++)
{
pass = pass && run_experiment<ALIGNMENT,ALIGN_OFFSET>(element_size,
element_count,src_stride,dst_stride,dma_warps);
}
}
}
else
{
// Just test the variable number of dma_warps
for (int dma_warps=1; dma_warps <= max_dma_warps; dma_warps++)
{
pass = pass && run_experiment<ALIGNMENT,ALIGN_OFFSET>(element_size,
element_count,min_stride,min_stride,dma_warps);
}
}
}
}
return pass;
}
#endif
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT, int NUM_ELMTS>
__host__
void run_all_dma_warps(bool &result)
{
assert(BYTES_PER_ELMT%sizeof(float)==0);
const int element_size = BYTES_PER_ELMT/sizeof(float);
const int min_stride = element_size + (element_size%(ALIGNMENT/sizeof(float)) ?
((ALIGNMENT/sizeof(float))-(element_size%(ALIGNMENT/sizeof(float)))) : 0);
const int warp_size=32;
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,1*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,2*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,3*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,4*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,5*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,6*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,7*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,8*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,9*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,10*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,11*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,12*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,13*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,14*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,15*warp_size>(min_stride,min_stride);
result = result && run_experiment<ALIGNMENT,ALIGN_OFFSET,BYTES_PER_ELMT,NUM_ELMTS,16*warp_size>(min_stride,min_stride);
}
template<int ALIGNMENT, int ALIGN_OFFSET, int BYTES_PER_ELMT>
__host__
void run_all_num_elements(bool &result)
{
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,1>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,2>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,3>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,4>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,5>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,6>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,7>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,8>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,9>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,10>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,11>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,12>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,13>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,14>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,15>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,16>(result);
#if 0
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,17>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,18>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,19>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,20>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,21>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,22>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,23>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,24>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,25>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,26>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,27>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,28>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,29>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,30>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,31>(result);
run_all_dma_warps<ALIGNMENT, ALIGN_OFFSET, BYTES_PER_ELMT,32>(result);
#endif
}
#if 0
template<int ALIGNMENT, int ALIGN_OFFSET>
__host__
bool run_all_experiments()
{
bool result = true;
{
const int base=0;
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=64;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=128;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=192;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=256;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=320;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=384;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=448;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=512;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=576;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=640;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=704;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=768;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=832;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=896;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
{
const int base=960;
#if 0
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+4>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+8>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+12>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+16>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+20>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+24>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+28>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+32>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+36>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+40>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+44>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+48>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+52>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+56>(result);
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+60>(result);
#endif
run_all_num_elements<ALIGNMENT,ALIGN_OFFSET,base+64>(result);
}
return result;
}
#endif
__host__
int main()
{
#if 0
bool success16_0 = true;
bool success08_0 = true;
bool success08_2 = true;
bool success04_0 = true;
bool success04_1 = true;
bool success04_2 = true;
bool success04_3 = true;
#endif
// An 8192*sizeof(float) element is almost all of shared memory (can only fit one)
// We probably want to test up to 32 elements per stride if we're going to have up to 16 dma warps
//success16_0 = success16_0 && run_all_experiments<16,0>(8192,32,16);
//success08_0 = success08_0 && run_all_experiments<8,0>(8192,32,16);
//success08_2 = success08_2 && run_all_experiments<8,2>(8192,32,16);
//success04_0 = success04_0 && run_all_experiments<4,0>(8192,32,16);
//success04_1 = success04_1 && run_all_experiments<4,1>(8192,32,16);
//success04_2 = success04_2 && run_all_experiments<4,2>(8192,32,16);
//success04_3 = success04_3 && run_all_experiments<4,3>(8192,32,16);
#if 1
const int element_size = PARAM_ELMT_SIZE/sizeof(float);
const int min_stride = element_size + (element_size%(PARAM_ALIGNMENT/sizeof(float)) ?
((PARAM_ALIGNMENT/sizeof(float))-(element_size%(PARAM_ALIGNMENT/sizeof(float)))) : 0);
if (PARAM_SPECIALIZED)
fprintf(stdout,"Warp-Specialied Experiment: ALIGNMENT-%2d OFFSET-%d ELMT_SIZE-%5d NUM_ELMTS-%2d DMA_WARPS-%2d NUM_TEMPLATES-%d ",PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE,PARAM_NUM_ELMTS,PARAM_DMA_THREADS/WARP_SIZE,PARAM_NUM_TEMPLATES);
else
fprintf(stdout,"Non-Warp-Specialized Experiment: ALIGNMENT-%2d OFFSET-%d ELMT_SIZE-%5d NUM_ELMTS-%2d TOTAL_WARPS-%2d NUM_TEMPLATES-%d ",PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE,PARAM_NUM_ELMTS,PARAM_DMA_THREADS/WARP_SIZE,PARAM_NUM_TEMPLATES);
fflush(stdout);
bool result = run_experiment<PARAM_SPECIALIZED,PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE,PARAM_NUM_ELMTS,PARAM_DMA_THREADS,PARAM_NUM_TEMPLATES>(min_stride,min_stride);
fprintf(stdout,"RESULT: %s\n",(result?"SUCCESS":"FAILURE"));
fflush(stdout);
#else
bool result = true;
printf("Running all experiments for ALIGNMENT-%d OFFSET-%d ELMT_SIZE-%d... ",PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE);
run_all_num_elements<PARAM_ALIGNMENT,PARAM_OFFSET,PARAM_ELMT_SIZE>(result);
printf("%s\n",(result?"SUCCESS":"FAILURE"));
#endif
//success16_0 = success16_0 && run_all_experiments<16,0>();
#if 0
fprintf(stdout,"\nResults:\n");
fprintf(stdout,"\tAlignment16-Offset0: %s\n",(success16_0?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment08-Offset0: %s\n",(success08_0?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment08-Offset2: %s\n",(success08_2?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment04-Offset0: %s\n",(success04_0?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment04-Offset1: %s\n",(success04_1?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment04-Offset2: %s\n",(success04_2?"SUCCESS":"FAILURE"));
fprintf(stdout,"\tAlignment04-Offset3: %s\n",(success04_3?"SUCCESS":"FAILURE"));
fprintf(stdout,"\n\tTotal Experiments - %ld\n", total_experiments);
#endif
return result;
}
|
31b907ac923e5ad49a0c3233bf3829ecb4388790.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define CHECK(call) { \
const hipError_t error = call; \
if (error != hipSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
} \
#define BLOCKSIZE 256
#define MASK 0xffffffff
void initialData(int *ip, const int size);
int naiveReduce(int *data, int size);
int neighboredPairReduce(int *data, const int size);
int interleavedPairReduce(int *data, const int size);
__global__ void reduceSharedMem(int *g_idata, int * g_odata, const int n);
__global__ void reduceWarpShfl(int *g_idata, int *g_odata, const int n);
__global__ void reduceSmemShfl(int *g_idata, int *g_odata, const int n);
int main(int argc, char **argv) {
int size = 1<<24, evenSize = size;
if (evenSize % 2 != 0) evenSize++; // should be even for pair-reducution to work
printf("Vector size %d\n", size);
size_t nBytes = evenSize * sizeof(int);
clock_t start, end;
double exeTime;
int reductionSum;
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// grid and block configuration
dim3 block(BLOCKSIZE);
dim3 grid((evenSize + block.x - 1)/ block.x);
printf("Grid dimension %d Block dimensiton %d\n", grid.x, block.x);
// allocate host memory
int *h_idata, *h_odata, *h_idata_cpy;
h_idata = (int *) malloc(nBytes);
h_odata = (int *) malloc(grid.x * sizeof(int));
h_idata_cpy = (int *) malloc(nBytes);
memset(h_idata, 0, nBytes);
initialData(h_idata, size);
memcpy(h_idata_cpy, h_idata, nBytes);
// 0. compute on CPU
start = clock();
// reductionSum = naiveReduce(h_idata_cpy, size);
// reductionSum = neighboredPairReduce(h_idata_cpy, evenSize);
reductionSum = interleavedPairReduce(h_idata_cpy, evenSize);
end = clock();
exeTime = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nCPU reduce: execution time %.4f ms, result %d\n\n", exeTime * 1e3, reductionSum);
// allocate device memory
int *d_idata, *d_odata;
CHECK(hipMalloc((int**)&d_idata, nBytes));
CHECK(hipMalloc((int**)&d_odata, grid.x * sizeof(int)));
// 1. using shared memory
CHECK(hipMemcpy(d_idata, h_idata, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemset(d_odata, 0, grid.x * sizeof(int)));
memset(h_odata, 0, grid.x * sizeof(int));
start = clock();
// CUDA part
hipLaunchKernelGGL(( reduceSharedMem), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, evenSize);
CHECK(hipDeviceSynchronize());
end = clock();
exeTime = ((double) (end - start)) / CLOCKS_PER_SEC;
// Host part
reductionSum = 0;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < grid.x; i++) reductionSum += h_odata[i];
printf("GPU shared memory: execution time %.4f ms, result %d\n", exeTime * 1e3, reductionSum);
CHECK(hipGetLastError());
// 2. reduce each warp using warp shuffle
CHECK(hipMemcpy(d_idata, h_idata, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemset(d_odata, 0, grid.x * sizeof(int)));
memset(h_odata, 0, grid.x * sizeof(int));
start = clock();
// CUDA part
hipLaunchKernelGGL(( reduceWarpShfl), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, evenSize);
CHECK(hipDeviceSynchronize());
end = clock();
exeTime = ((double) (end - start)) / CLOCKS_PER_SEC;
// Host part
reductionSum = 0;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < grid.x; i++) reductionSum += h_odata[i];
printf("reduce each warp via warp shuffle: execution time %.4f ms, result %d\n", exeTime * 1e3, reductionSum);
CHECK(hipGetLastError());
// 3. only reduce the last warp using warp shuffle
CHECK(hipMemcpy(d_idata, h_idata, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemset(d_odata, 0, grid.x * sizeof(int)));
memset(h_odata, 0, grid.x * sizeof(int));
start = clock();
// CUDA part
hipLaunchKernelGGL(( reduceSmemShfl), dim3(grid.x), dim3(block), 0, 0, d_idata, d_odata, evenSize);
CHECK(hipDeviceSynchronize());
end = clock();
exeTime = ((double) (end - start)) / CLOCKS_PER_SEC;
// Host part
reductionSum = 0;
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < grid.x; i++) reductionSum += h_odata[i];
printf("only reduce the last warp via warp shuffle: execution time %.4f ms, result %d\n", exeTime * 1e3, reductionSum);
CHECK(hipGetLastError());
// free host mem
free(h_idata);
free(h_odata);
free(h_idata_cpy);
// free device mem
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// clean up all resources
CHECK(hipDeviceReset());
return 0;
}
/**********CUDA kernels**********/
__global__ void reduceSharedMem(int *g_idata, int * g_odata, const int n) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
const int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// shared memory
__shared__ int smem[BLOCKSIZE];
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32) {
volatile int *vmem = smem;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__inline__ __device__ int warpReduce(int localSum) {
localSum += __shfl_xor_sync(MASK, localSum, 16);
localSum += __shfl_xor_sync(MASK, localSum, 8);
localSum += __shfl_xor_sync(MASK, localSum, 4);
localSum += __shfl_xor_sync(MASK, localSum, 2);
localSum += __shfl_xor_sync(MASK, localSum, 1);
return localSum;
}
__global__ void reduceWarpShfl(int *g_idata, int *g_odata, const int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// shared memory for each warp sum
__shared__ int smem[BLOCKSIZE / 32];
// calculate lane index and warp index
int laneIdx = threadIdx.x % warpSize;
int warpIdx = threadIdx.x / warpSize;
// blcok-wide warp reduce
int localSum = warpReduce(g_idata[idx]);
// save warp sum to shared memory
if (laneIdx == 0) smem[warpIdx] = localSum;
__syncthreads();
/**naive way: loop over smem and sum the values**/
// localSum = 0;
// for (int i = 0; i < BLOCKSIZE / 32; i++) localSum += smem[i];
/**alternative way: cal warp shuffle one more time**/
if (threadIdx.x < warpSize)
localSum = (threadIdx.x < BLOCKSIZE / warpSize) ? smem[laneIdx] : 0;
if (warpIdx == 0) localSum = warpReduce(localSum);
// write result for this block to global mem
if (threadIdx.x == 0) g_odata[blockIdx.x] = localSum;
}
__global__ void reduceSmemShfl(int *g_idata, int *g_odata, const int n) {
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
int *idata = g_idata + blockIdx.x * blockDim.x;
__shared__ int smem[BLOCKSIZE];
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
if (blockDim.x >= 64 && tid < 32) smem[tid] += smem[tid + 32];
__syncthreads();
int localSum = smem[tid];
localSum += __shfl_xor_sync(MASK, localSum, 16);
localSum += __shfl_xor_sync(MASK, localSum, 8);
localSum += __shfl_xor_sync(MASK, localSum, 4);
localSum += __shfl_xor_sync(MASK, localSum, 2);
localSum += __shfl_xor_sync(MASK, localSum, 1);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = localSum; //smem[0];
}
/**********host functions**********/
void initialData(int *ip, const int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for (int i = 0; i < size; i++) {
ip[i] = (int)( rand() & 0xFF );
}
}
int naiveReduce(int *data, int size) {
int sum = 0;
for (int i = 0; i < size; i++) {
sum += data[i];
}
return sum;
}
int neighboredPairReduce(int *data, const int size) {
for (int stride = 1; stride <= size / 2; stride *= 2) {
for (int i = 0; i < size; i += stride * 2) {
data[i] += data[i + stride];
}
}
return data[0];
}
int interleavedPairReduce(int *data, const int size) {
if (size == 1) return data[0];
const int stride = size / 2;
for (int i = 0; i < stride; i++) {
data[i] += data[i + stride];
}
return interleavedPairReduce(data, stride);
} | 31b907ac923e5ad49a0c3233bf3829ecb4388790.cu | #include <stdio.h>
#include <cuda_runtime.h>
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} \
#define BLOCKSIZE 256
#define MASK 0xffffffff
void initialData(int *ip, const int size);
int naiveReduce(int *data, int size);
int neighboredPairReduce(int *data, const int size);
int interleavedPairReduce(int *data, const int size);
__global__ void reduceSharedMem(int *g_idata, int * g_odata, const int n);
__global__ void reduceWarpShfl(int *g_idata, int *g_odata, const int n);
__global__ void reduceSmemShfl(int *g_idata, int *g_odata, const int n);
int main(int argc, char **argv) {
int size = 1<<24, evenSize = size;
if (evenSize % 2 != 0) evenSize++; // should be even for pair-reducution to work
printf("Vector size %d\n", size);
size_t nBytes = evenSize * sizeof(int);
clock_t start, end;
double exeTime;
int reductionSum;
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// grid and block configuration
dim3 block(BLOCKSIZE);
dim3 grid((evenSize + block.x - 1)/ block.x);
printf("Grid dimension %d Block dimensiton %d\n", grid.x, block.x);
// allocate host memory
int *h_idata, *h_odata, *h_idata_cpy;
h_idata = (int *) malloc(nBytes);
h_odata = (int *) malloc(grid.x * sizeof(int));
h_idata_cpy = (int *) malloc(nBytes);
memset(h_idata, 0, nBytes);
initialData(h_idata, size);
memcpy(h_idata_cpy, h_idata, nBytes);
// 0. compute on CPU
start = clock();
// reductionSum = naiveReduce(h_idata_cpy, size);
// reductionSum = neighboredPairReduce(h_idata_cpy, evenSize);
reductionSum = interleavedPairReduce(h_idata_cpy, evenSize);
end = clock();
exeTime = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nCPU reduce: execution time %.4f ms, result %d\n\n", exeTime * 1e3, reductionSum);
// allocate device memory
int *d_idata, *d_odata;
CHECK(cudaMalloc((int**)&d_idata, nBytes));
CHECK(cudaMalloc((int**)&d_odata, grid.x * sizeof(int)));
// 1. using shared memory
CHECK(cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemset(d_odata, 0, grid.x * sizeof(int)));
memset(h_odata, 0, grid.x * sizeof(int));
start = clock();
// CUDA part
reduceSharedMem<<<grid.x, block>>>(d_idata, d_odata, evenSize);
CHECK(cudaDeviceSynchronize());
end = clock();
exeTime = ((double) (end - start)) / CLOCKS_PER_SEC;
// Host part
reductionSum = 0;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < grid.x; i++) reductionSum += h_odata[i];
printf("GPU shared memory: execution time %.4f ms, result %d\n", exeTime * 1e3, reductionSum);
CHECK(cudaGetLastError());
// 2. reduce each warp using warp shuffle
CHECK(cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemset(d_odata, 0, grid.x * sizeof(int)));
memset(h_odata, 0, grid.x * sizeof(int));
start = clock();
// CUDA part
reduceWarpShfl<<<grid.x, block>>>(d_idata, d_odata, evenSize);
CHECK(cudaDeviceSynchronize());
end = clock();
exeTime = ((double) (end - start)) / CLOCKS_PER_SEC;
// Host part
reductionSum = 0;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < grid.x; i++) reductionSum += h_odata[i];
printf("reduce each warp via warp shuffle: execution time %.4f ms, result %d\n", exeTime * 1e3, reductionSum);
CHECK(cudaGetLastError());
// 3. only reduce the last warp using warp shuffle
CHECK(cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemset(d_odata, 0, grid.x * sizeof(int)));
memset(h_odata, 0, grid.x * sizeof(int));
start = clock();
// CUDA part
reduceSmemShfl<<<grid.x, block>>>(d_idata, d_odata, evenSize);
CHECK(cudaDeviceSynchronize());
end = clock();
exeTime = ((double) (end - start)) / CLOCKS_PER_SEC;
// Host part
reductionSum = 0;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < grid.x; i++) reductionSum += h_odata[i];
printf("only reduce the last warp via warp shuffle: execution time %.4f ms, result %d\n", exeTime * 1e3, reductionSum);
CHECK(cudaGetLastError());
// free host mem
free(h_idata);
free(h_odata);
free(h_idata_cpy);
// free device mem
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// clean up all resources
CHECK(cudaDeviceReset());
return 0;
}
/**********CUDA kernels**********/
__global__ void reduceSharedMem(int *g_idata, int * g_odata, const int n) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
const int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// shared memory
__shared__ int smem[BLOCKSIZE];
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32) {
volatile int *vmem = smem;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__inline__ __device__ int warpReduce(int localSum) {
localSum += __shfl_xor_sync(MASK, localSum, 16);
localSum += __shfl_xor_sync(MASK, localSum, 8);
localSum += __shfl_xor_sync(MASK, localSum, 4);
localSum += __shfl_xor_sync(MASK, localSum, 2);
localSum += __shfl_xor_sync(MASK, localSum, 1);
return localSum;
}
__global__ void reduceWarpShfl(int *g_idata, int *g_odata, const int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
// shared memory for each warp sum
__shared__ int smem[BLOCKSIZE / 32];
// calculate lane index and warp index
int laneIdx = threadIdx.x % warpSize;
int warpIdx = threadIdx.x / warpSize;
// blcok-wide warp reduce
int localSum = warpReduce(g_idata[idx]);
// save warp sum to shared memory
if (laneIdx == 0) smem[warpIdx] = localSum;
__syncthreads();
/**naive way: loop over smem and sum the values**/
// localSum = 0;
// for (int i = 0; i < BLOCKSIZE / 32; i++) localSum += smem[i];
/**alternative way: cal warp shuffle one more time**/
if (threadIdx.x < warpSize)
localSum = (threadIdx.x < BLOCKSIZE / warpSize) ? smem[laneIdx] : 0;
if (warpIdx == 0) localSum = warpReduce(localSum);
// write result for this block to global mem
if (threadIdx.x == 0) g_odata[blockIdx.x] = localSum;
}
__global__ void reduceSmemShfl(int *g_idata, int *g_odata, const int n) {
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
int *idata = g_idata + blockIdx.x * blockDim.x;
__shared__ int smem[BLOCKSIZE];
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
if (blockDim.x >= 64 && tid < 32) smem[tid] += smem[tid + 32];
__syncthreads();
int localSum = smem[tid];
localSum += __shfl_xor_sync(MASK, localSum, 16);
localSum += __shfl_xor_sync(MASK, localSum, 8);
localSum += __shfl_xor_sync(MASK, localSum, 4);
localSum += __shfl_xor_sync(MASK, localSum, 2);
localSum += __shfl_xor_sync(MASK, localSum, 1);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = localSum; //smem[0];
}
/**********host functions**********/
void initialData(int *ip, const int size) {
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for (int i = 0; i < size; i++) {
ip[i] = (int)( rand() & 0xFF );
}
}
int naiveReduce(int *data, int size) {
int sum = 0;
for (int i = 0; i < size; i++) {
sum += data[i];
}
return sum;
}
int neighboredPairReduce(int *data, const int size) {
for (int stride = 1; stride <= size / 2; stride *= 2) {
for (int i = 0; i < size; i += stride * 2) {
data[i] += data[i + stride];
}
}
return data[0];
}
int interleavedPairReduce(int *data, const int size) {
if (size == 1) return data[0];
const int stride = size / 2;
for (int i = 0; i < stride; i++) {
data[i] += data[i + stride];
}
return interleavedPairReduce(data, stride);
} |
f0a920c60e94185271107143b3da98df9c6e86f1.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_cdiv4hwn4_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
hipStream_t stream);
| f0a920c60e94185271107143b3da98df9c6e86f1.cu | /**
* \file dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_cdiv4hwn4_per_chan_hswish.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4.cuinl"
template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4<PerChannelBiasVisitor,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>(
const int8_t* d_src,
const int8_t* d_filter,
PerChannelBiasVisitor bias,
IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue,
const ConvParam& param,
float alpha,
float beta,
cudaStream_t stream);
|
7d45c90dd961bf8992a3f5bfe89f39e7468d7ec2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
extern "C" __declspec(dllexport) int myFunction(int n, float a, float *x, float *y)
{
float *d_x, *d_y;
hipMalloc(&d_x, n*sizeof(float));
hipMalloc(&d_y, n*sizeof(float));
hipMemcpy(d_x, x, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, n*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY (Single-Precision AX Plus Y) on GPU
hipLaunchKernelGGL(( saxpy) , dim3((n + 255)/256), dim3(256), 0, 0, n, a, d_x, d_y);
hipMemcpy(y, d_y, n*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_y);
return 1;
}
| 7d45c90dd961bf8992a3f5bfe89f39e7468d7ec2.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
extern "C" __declspec(dllexport) int myFunction(int n, float a, float *x, float *y)
{
float *d_x, *d_y;
cudaMalloc(&d_x, n*sizeof(float));
cudaMalloc(&d_y, n*sizeof(float));
cudaMemcpy(d_x, x, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, n*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY (Single-Precision A·X Plus Y) on GPU
saxpy <<<(n + 255)/256, 256>>> (n, a, d_x, d_y);
cudaMemcpy(y, d_y, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
return 1;
}
|
841ddfa95696c95312ab3c360e595cae22bc0e10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gtest/gtest.h"
#include "ATen/ATen.h"
#include "test_seed.h"
#include "ATen/core/TensorAccessor.h"
#include "ATen/hip/HIPContext.h"
#include <assert.h>
using namespace at;
__global__ void test_tensor_packed_accessor_kernel(
PackedTensorAccessor<float, 1, RestrictPtrTraits> resa,
PackedTensorAccessor<float, 2, RestrictPtrTraits> t1a,
PackedTensorAccessor<float, 1, RestrictPtrTraits> t2a) {
for (int64_t i = 0; i < resa.size(0); i++) {
float val = 0.0f;
for (int64_t j = 0; j < t1a.size(1); j++) {
val += t1a[i][j] * t2a[j];
}
resa[i] = val;
}
}
// test PackedTensorAccessor and Tensor.packed_accessor
TEST(PackedtensoraccessorTest, PackedtensoraccessorTestCUDA) {
manual_seed(123, at::kCPU);
manual_seed(123, at::kCUDA);
Tensor t1 = rand({4, 4}, CUDA(kFloat));
Tensor t2 = rand({4}, CUDA(kFloat));
Tensor res = empty({4}, CUDA(kFloat));
auto t1a = t1.packed_accessor<float, 2, RestrictPtrTraits>();
auto t2a = t2.packed_accessor<float, 1, RestrictPtrTraits>();
auto resa = res.packed_accessor<float, 1, RestrictPtrTraits>();
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( test_tensor_packed_accessor_kernel), dim3(1), dim3(1), 0, stream, resa, t1a, t2a);
hipError_t err = hipDeviceSynchronize();
bool isEQ = err == hipSuccess;
ASSERT_TRUE(isEQ);
auto expected = mv(t1, t2);
ASSERT_TRUE(res.allclose(expected));
}
| 841ddfa95696c95312ab3c360e595cae22bc0e10.cu | #include "gtest/gtest.h"
#include "ATen/ATen.h"
#include "test_seed.h"
#include "ATen/core/TensorAccessor.h"
#include "ATen/cuda/CUDAContext.h"
#include <assert.h>
using namespace at;
__global__ void test_tensor_packed_accessor_kernel(
PackedTensorAccessor<float, 1, RestrictPtrTraits> resa,
PackedTensorAccessor<float, 2, RestrictPtrTraits> t1a,
PackedTensorAccessor<float, 1, RestrictPtrTraits> t2a) {
for (int64_t i = 0; i < resa.size(0); i++) {
float val = 0.0f;
for (int64_t j = 0; j < t1a.size(1); j++) {
val += t1a[i][j] * t2a[j];
}
resa[i] = val;
}
}
// test PackedTensorAccessor and Tensor.packed_accessor
TEST(PackedtensoraccessorTest, PackedtensoraccessorTestCUDA) {
manual_seed(123, at::kCPU);
manual_seed(123, at::kCUDA);
Tensor t1 = rand({4, 4}, CUDA(kFloat));
Tensor t2 = rand({4}, CUDA(kFloat));
Tensor res = empty({4}, CUDA(kFloat));
auto t1a = t1.packed_accessor<float, 2, RestrictPtrTraits>();
auto t2a = t2.packed_accessor<float, 1, RestrictPtrTraits>();
auto resa = res.packed_accessor<float, 1, RestrictPtrTraits>();
auto stream = at::cuda::getCurrentCUDAStream();
test_tensor_packed_accessor_kernel<<<1, 1, 0, stream>>>(resa, t1a, t2a);
cudaError_t err = cudaDeviceSynchronize();
bool isEQ = err == cudaSuccess;
ASSERT_TRUE(isEQ);
auto expected = mv(t1, t2);
ASSERT_TRUE(res.allclose(expected));
}
|
b111f88002acc0202eb5672d1028e35327ea2544.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file pad.cu
* \brief
* \author Sebastian Bodenstein
*/
#include <algorithm>
#include "./pad-inl.h"
#include "../common/cuda_utils.h"
namespace mshadow {
namespace cuda {
////////////////////////////////////////////////////////////////////////////////
// Special Case: 2d image (so only pad width + height)
// Case 1: Replication Padding
// single_image_2d_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX =
min(max(padL, outputPointX), src.size(3) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), src.size(2) + padT - 1) - oStartY + iStartY;
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_2d_pad_edge_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src,
padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_edge_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = min(max(padL, outputPointX), grad_in.size(3) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), grad_in.size(2) + padT - 1) -
oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
hipLaunchKernelGGL(( image_2d_pad_edge_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_edge_grad_kernel);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
// cast sizes to int to use in min/max
int Ny = src.size(2);
int Nx = src.size(3);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int checkT = max(0, outputPointY - padT + 1);
int checkB = max(0, padT + Ny - outputPointY);
int checkL = max(0, outputPointX - padL + 1);
int checkR = max(0, padL + Nx - outputPointX);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkT * checkB * checkL * checkR);
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_2d_pad_constant_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
dst, src, padT, padL, constant);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_constant_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(3);
int inPointY = inPointId / grad_in.size(3);
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointY][inPointX] =
grad_out[batch][plane][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_in.size(2) * grad_in.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
hipLaunchKernelGGL(( image_2d_pad_constant_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_constant_grad_kernel);
}
// Case 3: Reflection Padding
// adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReflectionPadding.cu
template <int n_bits, typename DType>
__global__ void image_2d_pad_reflect_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, src.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + src.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, src.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + src.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_reflect(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_2d_pad_reflect_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, dst, src,
padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_reflect_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_reflect_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename DType>
inline void image_pad_reflect_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
hipLaunchKernelGGL(( image_2d_pad_reflect_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_reflect_grad_kernel);
}
////////////////////////////////////////////////////////////////////////////////
// Special Case: 3d image (pad depth + width + height)
// Case 1: Replication Padding
// single_image_3_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/VolumetricReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX =
min(max(padL, outputPointX), src.size(4) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), src.size(3) + padT - 1) - oStartY + iStartY;
int inputPointZ =
min(max(padF, outputPointZ), src.size(2) + padF - 1) - oStartZ + iStartZ;
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_3d_pad_edge_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
dst, src, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_edge_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) {
return;
}
int outputPointX = outputPointId % grad_out.size(4);
int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3);
int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = min(max(padL, outputPointX), grad_in.size(4) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), grad_in.size(3) + padT - 1) -
oStartY + iStartY;
int inputPointZ = min(max(padF, outputPointZ), grad_in.size(2) + padF - 1) -
oStartZ + iStartZ;
DType valueToCopy =
grad_out[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
hipLaunchKernelGGL(( image_3d_pad_edge_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_edge_grad_kernel);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
// cast sizes to int to use in min/max
int Nz = src.size(2);
int Ny = src.size(3);
int Nx = src.size(4);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int checkFront = max(0, outputPointZ - padF + 1);
int checkBack = max(0, padF + Nz - outputPointZ);
int checkTop = max(0, outputPointY - padT + 1);
int checkBottom = max(0, padT + Ny - outputPointY);
int checkLeft = max(0, outputPointX - padL + 1);
int checkRight = max(0, padL + Nx - outputPointX);
int inputPointZ = min(max(outputPointZ - padF, 0), Nz - 1);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkFront * checkBack * checkTop * checkBottom * checkLeft *
checkRight);
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_3d_pad_constant_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
dst, src, padF, padT, padL, constant);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_constant_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3) * grad_in.size(4);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(4);
int inPointY = (inPointId / grad_in.size(4)) % grad_in.size(3);
int inPointZ = inPointId / (grad_in.size(3) * grad_in.size(4));
int outPointZ = inPointZ + padF;
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointZ][inPointY][inPointX] =
grad_out[batch][plane][outPointZ][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_in.size(2) * grad_in.size(3) * grad_in.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
hipLaunchKernelGGL(( image_3d_pad_constant_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_constant_grad_kernel);
}
// Case 3: Reflection Padding
template <int n_bits, typename DType>
__global__ void image_3d_pad_reflect_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, src.size(4) + padL - 1, 0)
- outputPointX
+ 2 * padL + src.size(4) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, src.size(3) + padT - 1, 0)
- outputPointY
+ 2 * padT + src.size(3) - 1
- oStartY + iStartY;
int inputPointZ = __sad(outputPointZ, padF, 0)
- __sad(outputPointZ, src.size(2) + padF - 1, 0)
- outputPointZ
+ 2 * padF + src.size(2) - 1
- oStartZ + iStartZ;
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_reflect(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(dst.stream_);
hipLaunchKernelGGL(( image_3d_pad_reflect_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
dst, src, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_reflect_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_reflect_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) {
return;
}
int outputPointX = outputPointId % grad_out.size(4);
int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3);
int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(4) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(4) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(3) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(3) - 1
- oStartY + iStartY;
int inputPointZ = __sad(outputPointZ, padF, 0)
- __sad(outputPointZ, grad_in.size(2) + padF - 1, 0)
- outputPointZ
+ 2 * padF + grad_in.size(2) - 1
- oStartZ + iStartZ;
DType valueToCopy =
grad_out[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
/* int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);*/
template <typename DType>
inline void image_pad_reflect_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
hipStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
hipLaunchKernelGGL(( image_3d_pad_reflect_grad_kernel<kBaseThreadBits,
DType>), dim3(dimGrid), dim3(dimBlock), 0, stream,
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_reflect_grad_kernel);
}
////////////////////////////////////////////////////////////////////////////////
} // namespace cuda
template <int dim, typename DType>
void pad_image(Tensor<gpu, dim, DType> dst, const Tensor<gpu, dim, DType> src,
const mxnet::TShape pad, int mode, const DType constant_value) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge(dst, src, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant(dst, src, pad, constant_value);
break;
case mxnet::op::pad_enum::kReflect:
cuda::image_pad_reflect(dst, src, pad);
break;
}
}
template <int dim, typename DType>
void pad_image_grad(Tensor<gpu, dim, DType> grad_in,
const Tensor<gpu, dim, DType> grad_out,
const mxnet::TShape pad, int mode) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge_grad(grad_in, grad_out, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant_grad(grad_in, grad_out, pad);
break;
case mxnet::op::pad_enum::kReflect:
cuda::image_pad_reflect_grad(grad_in, grad_out, pad);
break;
}
}
} // namespace mshadow
////////////////////////////////////////////////////////////////////////////////
namespace mxnet {
namespace op {
template <>
Operator *CreateOp<gpu>(PadParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PadOp<gpu, DType>(param); })
return op;
}
} // namespace op
} // namespace mxnet
| b111f88002acc0202eb5672d1028e35327ea2544.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file pad.cu
* \brief
* \author Sebastian Bodenstein
*/
#include <algorithm>
#include "./pad-inl.h"
#include "../common/cuda_utils.h"
namespace mshadow {
namespace cuda {
////////////////////////////////////////////////////////////////////////////////
// Special Case: 2d image (so only pad width + height)
// Case 1: Replication Padding
// single_image_2d_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX =
min(max(padL, outputPointX), src.size(3) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), src.size(2) + padT - 1) - oStartY + iStartY;
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_2d_pad_edge_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(dst, src,
padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_edge_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_edge_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = min(max(padL, outputPointX), grad_in.size(3) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), grad_in.size(2) + padT - 1) -
oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_2d_pad_edge_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_edge_grad_kernel);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
// cast sizes to int to use in min/max
int Ny = src.size(2);
int Nx = src.size(3);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int checkT = max(0, outputPointY - padT + 1);
int checkB = max(0, padT + Ny - outputPointY);
int checkL = max(0, outputPointX - padL + 1);
int checkR = max(0, padL + Nx - outputPointX);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkT * checkB * checkL * checkR);
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_2d_pad_constant_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padT, padL, constant);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_constant_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_constant_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(3);
int inPointY = inPointId / grad_in.size(3);
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointY][inPointX] =
grad_out[batch][plane][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_in.size(2) * grad_in.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
image_2d_pad_constant_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_constant_grad_kernel);
}
// Case 3: Reflection Padding
// adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/SpatialReflectionPadding.cu
template <int n_bits, typename DType>
__global__ void image_2d_pad_reflect_kernel(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> src,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3)) {
return;
}
int outputPointX = outputPointId % dst.size(3);
int outputPointY = outputPointId / dst.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, src.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + src.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, src.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + src.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = src[batch][plane][inputPointY][inputPointX];
dst[batch][plane][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_reflect(Tensor<gpu, 4, DType> dst,
const Tensor<gpu, 4, DType> &src,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_2d_pad_reflect_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(dst, src,
padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_reflect_kernel);
}
template <int n_bits, typename DType>
__global__ void image_2d_pad_reflect_grad_kernel(
Tensor<gpu, 4, DType> grad_in, const Tensor<gpu, 4, DType> grad_out,
const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);
}
template <typename DType>
inline void image_pad_reflect_grad(Tensor<gpu, 4, DType> grad_in,
const Tensor<gpu, 4, DType> &grad_out,
const mxnet::TShape &pad) {
const int padT = pad[4];
const int padL = pad[6];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (grad_out.size(2) * grad_out.size(3) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_2d_pad_reflect_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_2d_pad_reflect_grad_kernel);
}
////////////////////////////////////////////////////////////////////////////////
// Special Case: 3d image (pad depth + width + height)
// Case 1: Replication Padding
// single_image_3_edge adapted from Torch
// https://github.com/torch/cunn/blob/master/lib/THCUNN/VolumetricReplicationPadding.cu
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX =
min(max(padL, outputPointX), src.size(4) + padL - 1) - oStartX + iStartX;
int inputPointY =
min(max(padT, outputPointY), src.size(3) + padT - 1) - oStartY + iStartY;
int inputPointZ =
min(max(padF, outputPointZ), src.size(2) + padF - 1) - oStartZ + iStartZ;
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_edge(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_3d_pad_edge_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_edge_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_edge_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) {
return;
}
int outputPointX = outputPointId % grad_out.size(4);
int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3);
int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = min(max(padL, outputPointX), grad_in.size(4) + padL - 1) -
oStartX + iStartX;
int inputPointY = min(max(padT, outputPointY), grad_in.size(3) + padT - 1) -
oStartY + iStartY;
int inputPointZ = min(max(padF, outputPointZ), grad_in.size(2) + padF - 1) -
oStartZ + iStartZ;
DType valueToCopy =
grad_out[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
template <typename DType>
inline void image_pad_edge_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_3d_pad_edge_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_edge_grad_kernel);
}
// Case 2: Constant Padding
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL,
const DType constant) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
// cast sizes to int to use in min/max
int Nz = src.size(2);
int Ny = src.size(3);
int Nx = src.size(4);
int plane = blockIdx.y;
int batch = blockIdx.z;
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int checkFront = max(0, outputPointZ - padF + 1);
int checkBack = max(0, padF + Nz - outputPointZ);
int checkTop = max(0, outputPointY - padT + 1);
int checkBottom = max(0, padT + Ny - outputPointY);
int checkLeft = max(0, outputPointX - padL + 1);
int checkRight = max(0, padL + Nx - outputPointX);
int inputPointZ = min(max(outputPointZ - padF, 0), Nz - 1);
int inputPointX = min(max(outputPointX - padL, 0), Nx - 1);
int inputPointY = min(max(outputPointY - padT, 0), Ny - 1);
// 1 if need padding, 0 if not
int need_pad = !(checkFront * checkBack * checkTop * checkBottom * checkLeft *
checkRight);
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] =
valueToCopy * (!need_pad) + need_pad * constant;
}
template <typename DType>
inline void image_pad_constant(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad, const DType constant) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_3d_pad_constant_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padF, padT, padL, constant);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_constant_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_constant_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int inPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
int pixel_num = grad_in.size(2) * grad_in.size(3) * grad_in.size(4);
if (inPointId >= pixel_num) {
return;
}
int inPointX = inPointId % grad_in.size(4);
int inPointY = (inPointId / grad_in.size(4)) % grad_in.size(3);
int inPointZ = inPointId / (grad_in.size(3) * grad_in.size(4));
int outPointZ = inPointZ + padF;
int outPointX = inPointX + padL;
int outPointY = inPointY + padT;
grad_in[batch][plane][inPointZ][inPointY][inPointX] =
grad_out[batch][plane][outPointZ][outPointY][outPointX];
}
template <typename DType>
inline void image_pad_constant_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_in.size(2) * grad_in.size(3) * grad_in.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_in.size(1), grad_in.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_in.stream_);
image_3d_pad_constant_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_constant_grad_kernel);
}
// Case 3: Reflection Padding
template <int n_bits, typename DType>
__global__ void image_3d_pad_reflect_kernel(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> src,
const int padF, const int padT,
const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= dst.size(2) * dst.size(3) * dst.size(4)) {
return;
}
int outputPointX = outputPointId % dst.size(4);
int outputPointY = (outputPointId / dst.size(4)) % dst.size(3);
int outputPointZ = outputPointId / (dst.size(3) * dst.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, src.size(4) + padL - 1, 0)
- outputPointX
+ 2 * padL + src.size(4) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, src.size(3) + padT - 1, 0)
- outputPointY
+ 2 * padT + src.size(3) - 1
- oStartY + iStartY;
int inputPointZ = __sad(outputPointZ, padF, 0)
- __sad(outputPointZ, src.size(2) + padF - 1, 0)
- outputPointZ
+ 2 * padF + src.size(2) - 1
- oStartZ + iStartZ;
DType valueToCopy = src[batch][plane][inputPointZ][inputPointY][inputPointX];
dst[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
template <typename DType>
inline void image_pad_reflect(Tensor<gpu, 5, DType> dst,
const Tensor<gpu, 5, DType> &src,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize = (dst.size(2) * dst.size(3) * dst.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, dst.size(1), dst.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(dst.stream_);
image_3d_pad_reflect_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
dst, src, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_reflect_kernel);
}
template <int n_bits, typename DType>
__global__ void image_3d_pad_reflect_grad_kernel(
Tensor<gpu, 5, DType> grad_in, const Tensor<gpu, 5, DType> grad_out,
const int padF, const int padT, const int padL) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3) * grad_out.size(4)) {
return;
}
int outputPointX = outputPointId % grad_out.size(4);
int outputPointY = (outputPointId / grad_out.size(4)) % grad_out.size(3);
int outputPointZ = outputPointId / (grad_out.size(3) * grad_out.size(4));
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int iStartZ = max(0, -padF);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int oStartZ = max(0, padF);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(4) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(4) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(3) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(3) - 1
- oStartY + iStartY;
int inputPointZ = __sad(outputPointZ, padF, 0)
- __sad(outputPointZ, grad_in.size(2) + padF - 1, 0)
- outputPointZ
+ 2 * padF + grad_in.size(2) - 1
- oStartZ + iStartZ;
DType valueToCopy =
grad_out[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
/* int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= grad_out.size(2) * grad_out.size(3)) {
return;
}
int outputPointX = outputPointId % grad_out.size(3);
int outputPointY = outputPointId / grad_out.size(3);
int iStartX = max(0, -padL);
int iStartY = max(0, -padT);
int oStartX = max(0, padL);
int oStartY = max(0, padT);
int inputPointX = __sad(outputPointX, padL, 0)
- __sad(outputPointX, grad_in.size(3) + padL - 1, 0)
- outputPointX
+ 2 * padL + grad_in.size(3) - 1
- oStartX + iStartX;
int inputPointY = __sad(outputPointY, padT, 0)
- __sad(outputPointY, grad_in.size(2) + padT - 1, 0)
- outputPointY
+ 2 * padT + grad_in.size(2) - 1
- oStartY + iStartY;
DType valueToCopy = grad_out[batch][plane][outputPointY][outputPointX];
atomicAdd(&grad_in[batch][plane][inputPointY][inputPointX], valueToCopy);*/
template <typename DType>
inline void image_pad_reflect_grad(Tensor<gpu, 5, DType> grad_in,
const Tensor<gpu, 5, DType> &grad_out,
const mxnet::TShape &pad) {
const int padF = pad[4];
const int padT = pad[6];
const int padL = pad[8];
dim3 dimBlock(kBaseThreadNum);
int xGridSize =
(grad_out.size(2) * grad_out.size(3) * grad_out.size(4) + 256 - 1) / 256;
dim3 dimGrid(xGridSize, grad_out.size(1), grad_out.size(0));
CheckLaunchParam(dimGrid, dimBlock, "Pad");
cudaStream_t stream = Stream<gpu>::GetStream(grad_out.stream_);
image_3d_pad_reflect_grad_kernel<kBaseThreadBits,
DType><<<dimGrid, dimBlock, 0, stream>>>(
grad_in, grad_out, padF, padT, padL);
MSHADOW_CUDA_POST_KERNEL_CHECK(image_3d_pad_reflect_grad_kernel);
}
////////////////////////////////////////////////////////////////////////////////
} // namespace cuda
template <int dim, typename DType>
void pad_image(Tensor<gpu, dim, DType> dst, const Tensor<gpu, dim, DType> src,
const mxnet::TShape pad, int mode, const DType constant_value) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge(dst, src, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant(dst, src, pad, constant_value);
break;
case mxnet::op::pad_enum::kReflect:
cuda::image_pad_reflect(dst, src, pad);
break;
}
}
template <int dim, typename DType>
void pad_image_grad(Tensor<gpu, dim, DType> grad_in,
const Tensor<gpu, dim, DType> grad_out,
const mxnet::TShape pad, int mode) {
switch (mode) {
case mxnet::op::pad_enum::kEdge:
cuda::image_pad_edge_grad(grad_in, grad_out, pad);
break;
case mxnet::op::pad_enum::kConstant:
cuda::image_pad_constant_grad(grad_in, grad_out, pad);
break;
case mxnet::op::pad_enum::kReflect:
cuda::image_pad_reflect_grad(grad_in, grad_out, pad);
break;
}
}
} // namespace mshadow
////////////////////////////////////////////////////////////////////////////////
namespace mxnet {
namespace op {
template <>
Operator *CreateOp<gpu>(PadParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PadOp<gpu, DType>(param); })
return op;
}
} // namespace op
} // namespace mxnet
|
02239e1b60f5cff799fe433ad0dd7afe2b971250.hip | // !!! This is a file automatically generated by hipify!!!
// Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP
#define SIZE 60000000
#define TILE_DIM 1024
#define INNER_REPS 4096
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < SIZE) {
ra=A[xIndex];
rb=A[SIZE-xIndex];
rc=A[xIndex];
rd=A[SIZE-xIndex];
// rb=A[xIndex];
#pragma unroll 4096
for (int i=0;i<INNER_REPS;i++) {
ra=ra*rc+rb;
rb=rb*rd+rc;
rc=rc*ra+rd;
rd=rd*rb+ra;
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int outer_reps, vector_size, tile_dim;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
hipEvent_t start, stop;
size_t mem_size = static_cast<size_t>(sizeof(int) * vector_size);
// allocate host memory
int *h_iA = (int *) malloc(mem_size);
int *h_oC1 = (int *) malloc(mem_size);
int *h_oC2 = (int *) malloc(mem_size);
int *h_oC3 = (int *) malloc(mem_size);
int *h_oC4 = (int *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (int) i+3;
// h_iB[i] = (float) i+3;
}
// allocate device memory
int *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
hipMalloc((void **) &d_iA, mem_size);
// hipMalloc((void **) &d_iB, mem_size);
hipMalloc((void **) &d_oC1, mem_size);
hipMalloc((void **) &d_oC2, mem_size);
hipMalloc((void **) &d_oC3, mem_size);
hipMalloc((void **) &d_oC4, mem_size);
// copy host data to device
hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice);
// hipMemcpy(d_iB, h_iB, mem_size, hipMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
hipEventCreate(&start);
hipEventCreate(&stop);
// take measurements for loop over kernel launches
hipEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
hipLaunchKernelGGL(( simpleKernel<int>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float kernelTime;
hipEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/outer_reps,
vector_size, 1, tile_dim * 1);
free(h_iA);
// free(h_iB);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
hipFree(d_iA);
// hipFree(d_iB);
hipFree(d_oC1);
hipFree(d_oC2);
hipFree(d_oC3);
hipFree(d_oC4);
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| 02239e1b60f5cff799fe433ad0dd7afe2b971250.cu | // Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP
#define SIZE 60000000
#define TILE_DIM 1024
#define INNER_REPS 4096
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < SIZE) {
ra=A[xIndex];
rb=A[SIZE-xIndex];
rc=A[xIndex];
rd=A[SIZE-xIndex];
// rb=A[xIndex];
#pragma unroll 4096
for (int i=0;i<INNER_REPS;i++) {
ra=ra*rc+rb;
rb=rb*rd+rc;
rc=rc*ra+rd;
rd=rd*rb+ra;
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int outer_reps, vector_size, tile_dim;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
cudaEvent_t start, stop;
size_t mem_size = static_cast<size_t>(sizeof(int) * vector_size);
// allocate host memory
int *h_iA = (int *) malloc(mem_size);
int *h_oC1 = (int *) malloc(mem_size);
int *h_oC2 = (int *) malloc(mem_size);
int *h_oC3 = (int *) malloc(mem_size);
int *h_oC4 = (int *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (int) i+3;
// h_iB[i] = (float) i+3;
}
// allocate device memory
int *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
cudaMalloc((void **) &d_iA, mem_size);
// cudaMalloc((void **) &d_iB, mem_size);
cudaMalloc((void **) &d_oC1, mem_size);
cudaMalloc((void **) &d_oC2, mem_size);
cudaMalloc((void **) &d_oC3, mem_size);
cudaMalloc((void **) &d_oC4, mem_size);
// copy host data to device
cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_iB, h_iB, mem_size, cudaMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// take measurements for loop over kernel launches
cudaEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
simpleKernel<int><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernelTime;
cudaEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/outer_reps,
vector_size, 1, tile_dim * 1);
free(h_iA);
// free(h_iB);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
cudaFree(d_iA);
// cudaFree(d_iB);
cudaFree(d_oC1);
cudaFree(d_oC2);
cudaFree(d_oC3);
cudaFree(d_oC4);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
f0395104924180ebda8ff626e446ba86d7d62428.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <math.h>
#include "support.h"
#ifndef pi
#define pi 3.14159265358979323846
#endif
/**************************************************************
KERNEL CODE
**************************************************************/
__global__ void backprojection(float *sinoData_d, float dist, unsigned int num_angles, unsigned int num_detectors, float *img_out_d ){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n;
n = 2 * floorf(num_detectors / (2 * sqrtf(2)));
float cos_theta;
float sin_theta;
float angle_rad;
int img_out_index;
unsigned sino_index;
for(int angle = 0; angle < num_angles; ++angle){
angle_rad = pi * angle / 180;
cos_theta = cosf(angle_rad);
sin_theta = sinf(angle_rad);
if((row < n) && (col < n)){
img_out_index = row * n + col;
sino_index = static_cast<unsigned int>(rintf((cos_theta * (col - n/2.0f) + sin_theta * (row - n/2.0f)) /
dist + num_detectors/2.0f)) + num_detectors * angle;
img_out_d[img_out_index] += sinoData_d[sino_index];
}
}
}
//This kernel examines what happens when independent calcaulations are removed
//from the loop. Calculations not dependent on loop(img_out_index = row * n + col;
__global__ void backprojection_reduceloopcalc(float *sinoData_d, float dist, unsigned int num_angles, unsigned int num_detectors, float *img_out_d){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n;
n = 2 * floor(num_detectors / (2 * sqrtf(2)));
float cos_theta;
float sin_theta;
float angle_rad;
int img_out_index;
unsigned int sino_index;
img_out_index = row * n + col;
for(int angle = 0; angle < num_angles; ++angle){
angle_rad = pi * angle / 180;
cos_theta = cosf(angle_rad);
sin_theta = sinf(angle_rad);
if((row < n) && (col < n)){
sino_index = static_cast<unsigned int>(rintf((cos_theta * (col - n/2.0f) + sin_theta * (row - n/2.0f)) /
dist + num_detectors/2.0f)) + num_detectors * angle;
img_out_d[img_out_index] += sinoData_d[sino_index];
}
}
}
//This kernel reduces number of accesses to global memory
//Declare an array variable in local memory
__global__ void backprojection_reduce_numaccess(float *sinoData_d, float dist, unsigned int num_angles, unsigned int num_detectors, float *img_out_d){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n;
n = 2 * floor(num_detectors / (2 * sqrtf(2)));
float cos_theta;
float sin_theta;
float angle_rad;
int img_out_index;
unsigned int sino_index;
float pixel = 0;
img_out_index = row * n + col;
for(int angle = 0; angle < num_angles; ++angle){
angle_rad = pi * angle / 180;
cos_theta = cosf(angle_rad);
sin_theta = sinf(angle_rad);
if((row < n) && (col < n)){
sino_index = static_cast<unsigned int>(rintf((cos_theta * (col - n/2.0f) + sin_theta * (row - n/2.0f)) /
dist + num_detectors/2.0f)) + num_detectors * angle;
pixel += sinoData_d[sino_index];
}
}
img_out_d[img_out_index] = pixel;
}
| f0395104924180ebda8ff626e446ba86d7d62428.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <math.h>
#include "support.h"
#ifndef pi
#define pi 3.14159265358979323846
#endif
/**************************************************************
KERNEL CODE
**************************************************************/
__global__ void backprojection(float *sinoData_d, float dist, unsigned int num_angles, unsigned int num_detectors, float *img_out_d ){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n;
n = 2 * floorf(num_detectors / (2 * sqrtf(2)));
float cos_theta;
float sin_theta;
float angle_rad;
int img_out_index;
unsigned sino_index;
for(int angle = 0; angle < num_angles; ++angle){
angle_rad = pi * angle / 180;
cos_theta = cosf(angle_rad);
sin_theta = sinf(angle_rad);
if((row < n) && (col < n)){
img_out_index = row * n + col;
sino_index = static_cast<unsigned int>(rintf((cos_theta * (col - n/2.0f) + sin_theta * (row - n/2.0f)) /
dist + num_detectors/2.0f)) + num_detectors * angle;
img_out_d[img_out_index] += sinoData_d[sino_index];
}
}
}
//This kernel examines what happens when independent calcaulations are removed
//from the loop. Calculations not dependent on loop(img_out_index = row * n + col;
__global__ void backprojection_reduceloopcalc(float *sinoData_d, float dist, unsigned int num_angles, unsigned int num_detectors, float *img_out_d){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n;
n = 2 * floor(num_detectors / (2 * sqrtf(2)));
float cos_theta;
float sin_theta;
float angle_rad;
int img_out_index;
unsigned int sino_index;
img_out_index = row * n + col;
for(int angle = 0; angle < num_angles; ++angle){
angle_rad = pi * angle / 180;
cos_theta = cosf(angle_rad);
sin_theta = sinf(angle_rad);
if((row < n) && (col < n)){
sino_index = static_cast<unsigned int>(rintf((cos_theta * (col - n/2.0f) + sin_theta * (row - n/2.0f)) /
dist + num_detectors/2.0f)) + num_detectors * angle;
img_out_d[img_out_index] += sinoData_d[sino_index];
}
}
}
//This kernel reduces number of accesses to global memory
//Declare an array variable in local memory
__global__ void backprojection_reduce_numaccess(float *sinoData_d, float dist, unsigned int num_angles, unsigned int num_detectors, float *img_out_d){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int n;
n = 2 * floor(num_detectors / (2 * sqrtf(2)));
float cos_theta;
float sin_theta;
float angle_rad;
int img_out_index;
unsigned int sino_index;
float pixel = 0;
img_out_index = row * n + col;
for(int angle = 0; angle < num_angles; ++angle){
angle_rad = pi * angle / 180;
cos_theta = cosf(angle_rad);
sin_theta = sinf(angle_rad);
if((row < n) && (col < n)){
sino_index = static_cast<unsigned int>(rintf((cos_theta * (col - n/2.0f) + sin_theta * (row - n/2.0f)) /
dist + num_detectors/2.0f)) + num_detectors * angle;
pixel += sinoData_d[sino_index];
}
}
img_out_d[img_out_index] = pixel;
}
|
f8f109966b9941d8f348b0f88eb6233887f92f77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void vecAddKernel(float* A, float* B, float* C, int n)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) C[i] = A[i] + B[i];
}
void vecAdd(float* A, float* B, float* C, int n) {
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
///
hipMalloc((void **) &d_A, size);
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_B, size);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_C, size);
hipLaunchKernelGGL(( vecAddKernel), dim3(ceil(n/256.0)), dim3(256), 0, 0, d_A, d_B, d_C, n);
hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost);
///Free device memory for A, B, C
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
int main(int argc, char const *argv[]) {
float *A, *B, *C;
int n = 10000;
A = new float[n];
B = new float[n];
C = new float[n];
for (int i = 0; i < n; i++) {
A[i] = i / 100.0;
B[i] = i;
C[i] = 0;
}
vecAdd (A, B, C, n);
for (int i = 0; i < n; i++) {
printf("%f\n", C[i]);
}
return 0;
}
| f8f109966b9941d8f348b0f88eb6233887f92f77.cu | #include <stdio.h>
__global__
void vecAddKernel(float* A, float* B, float* C, int n)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if(i < n) C[i] = A[i] + B[i];
}
void vecAdd(float* A, float* B, float* C, int n) {
int size = n * sizeof(float);
float *d_A, *d_B, *d_C;
///
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
vecAddKernel<<<ceil(n/256.0), 256>>>(d_A, d_B, d_C, n);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
///Free device memory for A, B, C
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
int main(int argc, char const *argv[]) {
float *A, *B, *C;
int n = 10000;
A = new float[n];
B = new float[n];
C = new float[n];
for (int i = 0; i < n; i++) {
A[i] = i / 100.0;
B[i] = i;
C[i] = 0;
}
vecAdd (A, B, C, n);
for (int i = 0; i < n; i++) {
printf("%f\n", C[i]);
}
return 0;
}
|
8bff497e80d8c6dae74999df994e0cd265805b40.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amg_solver.h>
#include <amg_config.h>
#include <amg.h>
#include <basic_types.h>
#include <misc.h>
#include <assert.h>
#include <util.h>
using std::string;
namespace amgx
{
template< class T_Config >
void AMG_Solver<T_Config>::process_config(AMG_Config &in_cfg, std::string solver_scope)
{
if (in_cfg.getParameter<int>("print_config", solver_scope) == 1)
{
in_cfg.printAMGConfig();
}
}
template< class T_Config >
void AMG_Solver<T_Config>::init()
{
std::string solver_value, solver_scope;
m_cfg->template getParameter<std::string>("solver", solver_value, "default", solver_scope);
process_config(*m_cfg, solver_scope);
// pass thread manager to solver
solver = SolverFactory<T_Config>::allocate(*m_cfg, "default", "solver", m_resources->get_tmng());
structure_reuse_levels_scope = "";
// Reusing structure
if (solver_value == "AMG") // AMG is used as main solver
{
structure_reuse_levels_scope = solver_scope;
}
else
{
// AMG might be used as preconditioner of the main solver std::string preconditioner_value, preconditioner_scope;
std::string preconditioner_value, preconditioner_scope;
m_cfg->template getParameter<std::string>("preconditioner", preconditioner_value, solver_scope, preconditioner_scope);
if (preconditioner_value == "AMG")
{
structure_reuse_levels_scope = preconditioner_scope;
}
else
{
structure_reuse_levels_scope = "";
}
}
if ( m_with_timings )
{
hipEventCreate(&m_setup_start);
hipEventCreate(&m_setup_stop);
hipEventCreate(&m_solve_start);
hipEventCreate(&m_solve_stop);
}
}
template< class T_Config >
AMG_Solver<T_Config>::AMG_Solver(Resources *res, AMG_Configuration *cfg) : m_with_timings(false), m_resources(res), m_cfg_self(false)
{
if (cfg)
{
m_cfg = cfg->getConfigObject();
}
else
{
m_cfg = res->getResourcesConfig();
}
init();
}
template< class T_Config >
AMG_Solver<T_Config>::AMG_Solver(Resources *res, AMG_Configuration &cfg) : m_with_timings(false), m_resources(res), m_cfg_self(true)
{
m_cfg = new AMG_Config;
*m_cfg = *(cfg.getConfigObject());
init();
}
template< class T_Config >
AMG_Solver<T_Config>::AMG_Solver(const AMG_Solver<T_Config> &amg_solver)
{
solver = amg_solver.solver;
m_resources = amg_solver.getResources();
m_cfg = amg_solver.getConfig();
m_cfg_self = false;
solver->incr_ref_count();
m_ptrA = amg_solver.m_ptrA;
m_with_timings = amg_solver.m_with_timings;
if ( m_with_timings )
{
hipEventCreate(&m_setup_start);
hipEventCreate(&m_setup_stop);
hipEventCreate(&m_solve_start);
hipEventCreate(&m_solve_stop);
}
}
template< class T_Config >
AMG_Solver<T_Config> &AMG_Solver<T_Config>::operator=(const AMG_Solver<T_Config> &amg_solver)
{
solver = amg_solver.solver;
m_resources = amg_solver.getResources();
m_cfg = amg_solver.getConfig();
m_cfg_self = false;
solver->incr_ref_count();
m_ptrA = amg_solver.m_ptrA;
m_with_timings = amg_solver.m_with_timings;
if ( m_with_timings )
{
hipEventCreate(&m_setup_start);
hipEventCreate(&m_setup_stop);
hipEventCreate(&m_solve_start);
hipEventCreate(&m_solve_stop);
}
return *this;
}
template< class T_Config >
AMG_Solver<T_Config>::~AMG_Solver()
{
if (m_cfg_self)
{
delete m_cfg;
}
if ( solver->decr_ref_count() )
{
delete solver;
if ( !m_with_timings )
{
return;
}
std::cerr << std::endl;
float elapsed_time = 0.0f;
hipEventElapsedTime(&elapsed_time, m_setup_start, m_setup_stop);
std::cerr << "AMG_Solver::setup time: " << 1.0e-3 * elapsed_time << "s" << std::endl;
hipEventElapsedTime(&elapsed_time, m_solve_start, m_solve_stop);
std::cerr << "AMG_Solver::solve time: " << 1.0e-3 * elapsed_time << "s" << std::endl;
hipEventDestroy(m_setup_start);
hipEventDestroy(m_setup_stop);
hipEventDestroy(m_solve_start);
hipEventDestroy(m_solve_stop);
}
}
template < class T_Config >
const AMG_Solver<T_Config>::PODVector_h &AMG_Solver<T_Config>::get_residual( int res_num ) const
{
return solver->get_residual(res_num);
}
/****************************************************
* Sets A as the matrix for the AMG system
****************************************************/
template< class T_Config >
AMGX_ERROR AMG_Solver<T_Config>::setup( Matrix<T_Config> &A)//&A0)
{
bool reuse_fine_matrix = (getStructureReuseLevels() > 0) && A.is_matrix_setup();
bool reuse_all = (getStructureReuseLevels() == -1) && A.is_matrix_setup();
if (reuse_all)
{
solver->reset_setup_timer();
return AMGX_OK;
}
if ( m_with_timings )
{
hipEventRecord(m_setup_start);
}
// postpone free syncs, use device pool
memory::setAsyncFreeFlag(true);
AMGX_ERROR e = solver->setup_no_throw(A, reuse_fine_matrix);
m_resources->get_tmng()->wait_threads();
thrust::global_thread_handle::joinDevicePools();
// reset settings to normal
memory::setAsyncFreeFlag(false);
// free postponed objects
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
hipEventRecord(m_setup_stop);
hipEventSynchronize(m_setup_stop);
}
return e;
}
template< class T_Config >
AMGX_ERROR AMG_Solver<T_Config>::resetup( Matrix<T_Config> &A)//&A0 )
{
if ( m_with_timings )
{
hipEventRecord(m_setup_start);
}
// postpone free syncs, use device pool
memory::setAsyncFreeFlag(true);
AMGX_ERROR e = solver->setup_no_throw(A, true);
m_resources->get_tmng()->wait_threads();
thrust::global_thread_handle::joinDevicePools();
// reset settings to normal
memory::setAsyncFreeFlag(false);
// free postponed objects
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
hipEventRecord(m_setup_stop);
hipEventSynchronize(m_setup_stop);
}
return e;
}
template< class T_Config >
AMGX_ERROR AMG_Solver<T_Config>::setup_capi( std::shared_ptr<Matrix<T_Config>> pA0)
{
m_ptrA = pA0;
return setup(*m_ptrA);
}
template< class T_Config >
AMGX_ERROR AMG_Solver<T_Config>::resetup_capi( std::shared_ptr<Matrix<T_Config>> pA0)
{
m_ptrA = pA0;
return resetup(*m_ptrA);
}
/****************************************************
* Solves the AMG system Ax=b
***************************************************/
template<class T_Config>
AMGX_ERROR AMG_Solver<T_Config>::solve( Vector<T_Config> &b, Vector<T_Config> &x, AMGX_STATUS &status, bool xIsZero )
{
if ( m_with_timings )
{
hipEventRecord(m_solve_start);
}
AMGX_ERROR e = solver->solve_no_throw( b, x, status, xIsZero );
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
hipEventRecord(m_solve_stop);
hipEventSynchronize(m_solve_stop);
}
return e;
}
template<class T_Config>
int AMG_Solver<T_Config>::get_num_iters()
{
return solver->get_num_iters();
}
template< class T_Config >
int AMG_Solver<T_Config>::getStructureReuseLevels()
{
int lvls = 0;
if (structure_reuse_levels_scope != "") // AMG is used as main solver
{
lvls = m_cfg->template getParameter<int>("structure_reuse_levels", structure_reuse_levels_scope);
}
return lvls;
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class AMG_Solver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
| 8bff497e80d8c6dae74999df994e0cd265805b40.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <amg_solver.h>
#include <amg_config.h>
#include <amg.h>
#include <basic_types.h>
#include <misc.h>
#include <assert.h>
#include <util.h>
using std::string;
namespace amgx
{
template< class T_Config >
void AMG_Solver<T_Config>::process_config(AMG_Config &in_cfg, std::string solver_scope)
{
if (in_cfg.getParameter<int>("print_config", solver_scope) == 1)
{
in_cfg.printAMGConfig();
}
}
template< class T_Config >
void AMG_Solver<T_Config>::init()
{
std::string solver_value, solver_scope;
m_cfg->template getParameter<std::string>("solver", solver_value, "default", solver_scope);
process_config(*m_cfg, solver_scope);
// pass thread manager to solver
solver = SolverFactory<T_Config>::allocate(*m_cfg, "default", "solver", m_resources->get_tmng());
structure_reuse_levels_scope = "";
// Reusing structure
if (solver_value == "AMG") // AMG is used as main solver
{
structure_reuse_levels_scope = solver_scope;
}
else
{
// AMG might be used as preconditioner of the main solver std::string preconditioner_value, preconditioner_scope;
std::string preconditioner_value, preconditioner_scope;
m_cfg->template getParameter<std::string>("preconditioner", preconditioner_value, solver_scope, preconditioner_scope);
if (preconditioner_value == "AMG")
{
structure_reuse_levels_scope = preconditioner_scope;
}
else
{
structure_reuse_levels_scope = "";
}
}
if ( m_with_timings )
{
cudaEventCreate(&m_setup_start);
cudaEventCreate(&m_setup_stop);
cudaEventCreate(&m_solve_start);
cudaEventCreate(&m_solve_stop);
}
}
template< class T_Config >
AMG_Solver<T_Config>::AMG_Solver(Resources *res, AMG_Configuration *cfg) : m_with_timings(false), m_resources(res), m_cfg_self(false)
{
if (cfg)
{
m_cfg = cfg->getConfigObject();
}
else
{
m_cfg = res->getResourcesConfig();
}
init();
}
template< class T_Config >
AMG_Solver<T_Config>::AMG_Solver(Resources *res, AMG_Configuration &cfg) : m_with_timings(false), m_resources(res), m_cfg_self(true)
{
m_cfg = new AMG_Config;
*m_cfg = *(cfg.getConfigObject());
init();
}
template< class T_Config >
AMG_Solver<T_Config>::AMG_Solver(const AMG_Solver<T_Config> &amg_solver)
{
solver = amg_solver.solver;
m_resources = amg_solver.getResources();
m_cfg = amg_solver.getConfig();
m_cfg_self = false;
solver->incr_ref_count();
m_ptrA = amg_solver.m_ptrA;
m_with_timings = amg_solver.m_with_timings;
if ( m_with_timings )
{
cudaEventCreate(&m_setup_start);
cudaEventCreate(&m_setup_stop);
cudaEventCreate(&m_solve_start);
cudaEventCreate(&m_solve_stop);
}
}
template< class T_Config >
AMG_Solver<T_Config> &AMG_Solver<T_Config>::operator=(const AMG_Solver<T_Config> &amg_solver)
{
solver = amg_solver.solver;
m_resources = amg_solver.getResources();
m_cfg = amg_solver.getConfig();
m_cfg_self = false;
solver->incr_ref_count();
m_ptrA = amg_solver.m_ptrA;
m_with_timings = amg_solver.m_with_timings;
if ( m_with_timings )
{
cudaEventCreate(&m_setup_start);
cudaEventCreate(&m_setup_stop);
cudaEventCreate(&m_solve_start);
cudaEventCreate(&m_solve_stop);
}
return *this;
}
template< class T_Config >
AMG_Solver<T_Config>::~AMG_Solver()
{
if (m_cfg_self)
{
delete m_cfg;
}
if ( solver->decr_ref_count() )
{
delete solver;
if ( !m_with_timings )
{
return;
}
std::cerr << std::endl;
float elapsed_time = 0.0f;
cudaEventElapsedTime(&elapsed_time, m_setup_start, m_setup_stop);
std::cerr << "AMG_Solver::setup time: " << 1.0e-3 * elapsed_time << "s" << std::endl;
cudaEventElapsedTime(&elapsed_time, m_solve_start, m_solve_stop);
std::cerr << "AMG_Solver::solve time: " << 1.0e-3 * elapsed_time << "s" << std::endl;
cudaEventDestroy(m_setup_start);
cudaEventDestroy(m_setup_stop);
cudaEventDestroy(m_solve_start);
cudaEventDestroy(m_solve_stop);
}
}
template < class T_Config >
const AMG_Solver<T_Config>::PODVector_h &AMG_Solver<T_Config>::get_residual( int res_num ) const
{
return solver->get_residual(res_num);
}
/****************************************************
* Sets A as the matrix for the AMG system
****************************************************/
template< class T_Config >
AMGX_ERROR AMG_Solver<T_Config>::setup( Matrix<T_Config> &A)//&A0)
{
bool reuse_fine_matrix = (getStructureReuseLevels() > 0) && A.is_matrix_setup();
bool reuse_all = (getStructureReuseLevels() == -1) && A.is_matrix_setup();
if (reuse_all)
{
solver->reset_setup_timer();
return AMGX_OK;
}
if ( m_with_timings )
{
cudaEventRecord(m_setup_start);
}
// postpone free syncs, use device pool
memory::setAsyncFreeFlag(true);
AMGX_ERROR e = solver->setup_no_throw(A, reuse_fine_matrix);
m_resources->get_tmng()->wait_threads();
thrust::global_thread_handle::joinDevicePools();
// reset settings to normal
memory::setAsyncFreeFlag(false);
// free postponed objects
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
cudaEventRecord(m_setup_stop);
cudaEventSynchronize(m_setup_stop);
}
return e;
}
template< class T_Config >
AMGX_ERROR AMG_Solver<T_Config>::resetup( Matrix<T_Config> &A)//&A0 )
{
if ( m_with_timings )
{
cudaEventRecord(m_setup_start);
}
// postpone free syncs, use device pool
memory::setAsyncFreeFlag(true);
AMGX_ERROR e = solver->setup_no_throw(A, true);
m_resources->get_tmng()->wait_threads();
thrust::global_thread_handle::joinDevicePools();
// reset settings to normal
memory::setAsyncFreeFlag(false);
// free postponed objects
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
cudaEventRecord(m_setup_stop);
cudaEventSynchronize(m_setup_stop);
}
return e;
}
template< class T_Config >
AMGX_ERROR AMG_Solver<T_Config>::setup_capi( std::shared_ptr<Matrix<T_Config>> pA0)
{
m_ptrA = pA0;
return setup(*m_ptrA);
}
template< class T_Config >
AMGX_ERROR AMG_Solver<T_Config>::resetup_capi( std::shared_ptr<Matrix<T_Config>> pA0)
{
m_ptrA = pA0;
return resetup(*m_ptrA);
}
/****************************************************
* Solves the AMG system Ax=b
***************************************************/
template<class T_Config>
AMGX_ERROR AMG_Solver<T_Config>::solve( Vector<T_Config> &b, Vector<T_Config> &x, AMGX_STATUS &status, bool xIsZero )
{
if ( m_with_timings )
{
cudaEventRecord(m_solve_start);
}
AMGX_ERROR e = solver->solve_no_throw( b, x, status, xIsZero );
thrust::global_thread_handle::cudaFreeWait();
if ( m_with_timings )
{
cudaEventRecord(m_solve_stop);
cudaEventSynchronize(m_solve_stop);
}
return e;
}
template<class T_Config>
int AMG_Solver<T_Config>::get_num_iters()
{
return solver->get_num_iters();
}
template< class T_Config >
int AMG_Solver<T_Config>::getStructureReuseLevels()
{
int lvls = 0;
if (structure_reuse_levels_scope != "") // AMG is used as main solver
{
lvls = m_cfg->template getParameter<int>("structure_reuse_levels", structure_reuse_levels_scope);
}
return lvls;
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class AMG_Solver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
fe1228798636873bba5544c6f2ab97f501cfc140.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgeisai_maxblock.cu, normal z -> s, Wed Jan 2 14:18:54 2019
*/
#include "magmasparse_internal.h"
#define PRECISION_s
#define REAL
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
__global__ void
magma_sselect_insert_kernel(
magma_int_t n,
magma_int_t p,
magma_index_t *row,
magma_index_t *col,
float *val,
magma_index_t *rowMT,
magma_index_t *colMT,
float *valMT,
magma_index_t *selection,
magma_index_t *sizes )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
magma_index_t select = selection[j];
// return if no match for this thread block
if( select != p ){
return;
}
magma_index_t count = sizes[j];
if( i<count ){
colMT[ rowMT[j]+i ] = col[ row[j]+i ];
valMT[ rowMT[j]+i ] = val[ row[j]+i ];
}
}// kernel
__global__ void
magma_sselect_rowptr_kernel(
magma_int_t n,
magma_index_t *sizes,
magma_index_t *rowMT )
{
// unfortunately sequential...
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i == 0 ){
magma_index_t count = 0;
rowMT[0] = 0;
magma_index_t j=0;
for( j=0; j<n; j++ ){
count = count+sizes[j];
rowMT[j+1] = count;
}
}
}// kernel
__global__ void
magma_sselect_pattern_kernel(
magma_int_t n,
magma_int_t p,
magma_index_t *row,
magma_index_t *selection,
magma_index_t *sizes )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
magma_index_t diff = row[i+1] - row[i];
if( diff <= WRP ){
selection[ i ] = p;
sizes[i] = diff;
}
}
}// kernel
/**
Purpose
-------
This routine maximizes the pattern for the ISAI preconditioner. Precisely,
it computes L, L^2, L^3, L^4, L^5 and then selects the columns of M_L
such that the nonzer-per-column are the lower max than the
implementation-specific limit (32).
The input is the original matrix (row-major)
The output is already col-major.
Arguments
---------
@param[in,out]
L magma_s_matrix
Incomplete factor.
@param[in,out]
MT magma_s_matrix*
SPAI preconditioner structure, CSR col-major.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sgeisai_maxblock(
magma_s_matrix L,
magma_s_matrix *MT,
magma_queue_t queue )
{
magma_int_t info = 0;
int bs1 = 512;
int bs2 = 1;
int bs3 = 1;
int gs1 = magma_ceildiv( L.num_rows, bs1 );
int gs2 = 1;
int gs3 = 1;
dim3 block( bs1, bs2, bs3 );
dim3 grid( gs1,gs2,gs3 );
dim3 block0( 1, 1, 1 );
dim3 grid0( 1, 1, 1 );
int blocksize1 = WARP_SIZE;
int blocksize2 = 1;
int dimgrid1 = min( int( sqrt( float( L.num_rows ))), 65535 );
int dimgrid2 = min(magma_ceildiv( L.num_rows, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( L.num_rows, dimgrid1*dimgrid2 );
dim3 block2( blocksize1, blocksize2, 1 );
dim3 grid2( dimgrid1, dimgrid2, dimgrid3 );
magma_s_matrix L2={Magma_CSR}, L3={Magma_CSR},
L4={Magma_CSR}, L5={Magma_CSR}, T={Magma_CSR};
magma_index_t *selections_d = NULL, *sizes_d = NULL;
CHECK( magma_index_malloc( &selections_d, L.num_rows ) );
CHECK( magma_index_malloc( &sizes_d, L.num_rows ) );
magma_int_t nonzeros;
// generate all pattern that may be considered
// pattern L
CHECK( magma_s_mtransfer( L, &T, Magma_DEV, Magma_DEV, queue ) );
// pattern L^2
CHECK( magma_s_spmm( MAGMA_S_ONE, L, T, &L2, queue ) );
// pattern L^3
CHECK( magma_s_spmm( MAGMA_S_ONE, T, L2, &L3, queue ) );
// pattern L^4
CHECK( magma_s_spmm( MAGMA_S_ONE, T, L3, &L4, queue ) );
// pattern L^5
CHECK( magma_s_spmm( MAGMA_S_ONE, T, L4, &L5, queue ) );
// check for pattern L
hipLaunchKernelGGL(( magma_sselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 1, L.drow, selections_d, sizes_d );
// check for pattern L2
hipLaunchKernelGGL(( magma_sselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 2, L2.drow, selections_d, sizes_d );
// check for pattern L3
hipLaunchKernelGGL(( magma_sselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 3, L3.drow, selections_d, sizes_d );
// check for pattern L4
hipLaunchKernelGGL(( magma_sselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 4, L4.drow, selections_d, sizes_d );
// check for pattern L5
hipLaunchKernelGGL(( magma_sselect_pattern_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
L.num_rows, 5, L5.drow, selections_d, sizes_d );
//now allocate the roptr for MT
CHECK( magma_index_malloc( &MT->drow, L.num_rows+1 ) );
// global nonzero count + generate rowptr
hipLaunchKernelGGL(( magma_sselect_rowptr_kernel), dim3(grid0), dim3(block0), 0, queue->cuda_stream() ,
L.num_rows, sizes_d, MT->drow );
hipMemcpy( &nonzeros, MT->drow+L.num_rows, sizeof(magma_index_t), hipMemcpyDeviceToHost);
//now allocate the memory needed
CHECK( magma_index_malloc( &MT->dcol, nonzeros ) );
CHECK( magma_smalloc( &MT->dval, nonzeros ) );
// fill in some info
MT->memory_location = Magma_DEV;
MT->storage_type = Magma_CSR;
MT->num_rows = L.num_rows;
MT->num_cols = L.num_cols;
MT->nnz = nonzeros;
MT->true_nnz = nonzeros;
MT->fill_mode = T.fill_mode;
// now insert the data needed
hipLaunchKernelGGL(( magma_sselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 1,
L.drow, L.dcol, L.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
hipLaunchKernelGGL(( magma_sselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 2,
L2.drow, L2.dcol, L2.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
hipLaunchKernelGGL(( magma_sselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 3,
L3.drow, L3.dcol, L3.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
hipLaunchKernelGGL(( magma_sselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 4,
L4.drow, L4.dcol, L4.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
hipLaunchKernelGGL(( magma_sselect_insert_kernel), dim3(grid2), dim3(block2), 0, queue->cuda_stream() ,
L.num_rows, 5,
L5.drow, L5.dcol, L5.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
cleanup:
magma_free( sizes_d );
magma_free( selections_d );
magma_smfree( &T, queue );
magma_smfree( &L2, queue );
magma_smfree( &L3, queue );
magma_smfree( &L4, queue );
magma_smfree( &L5, queue );
return info;
}
| fe1228798636873bba5544c6f2ab97f501cfc140.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgeisai_maxblock.cu, normal z -> s, Wed Jan 2 14:18:54 2019
*/
#include "magmasparse_internal.h"
#define PRECISION_s
#define REAL
#define BLOCKSIZE 32
#define WARP_SIZE 32
#define WRP 32
#define WRQ 4
__global__ void
magma_sselect_insert_kernel(
magma_int_t n,
magma_int_t p,
magma_index_t *row,
magma_index_t *col,
float *val,
magma_index_t *rowMT,
magma_index_t *colMT,
float *valMT,
magma_index_t *selection,
magma_index_t *sizes )
{
int i = threadIdx.x;
int j = blockIdx.y * gridDim.x + blockIdx.x;
magma_index_t select = selection[j];
// return if no match for this thread block
if( select != p ){
return;
}
magma_index_t count = sizes[j];
if( i<count ){
colMT[ rowMT[j]+i ] = col[ row[j]+i ];
valMT[ rowMT[j]+i ] = val[ row[j]+i ];
}
}// kernel
__global__ void
magma_sselect_rowptr_kernel(
magma_int_t n,
magma_index_t *sizes,
magma_index_t *rowMT )
{
// unfortunately sequential...
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i == 0 ){
magma_index_t count = 0;
rowMT[0] = 0;
magma_index_t j=0;
for( j=0; j<n; j++ ){
count = count+sizes[j];
rowMT[j+1] = count;
}
}
}// kernel
__global__ void
magma_sselect_pattern_kernel(
magma_int_t n,
magma_int_t p,
magma_index_t *row,
magma_index_t *selection,
magma_index_t *sizes )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < n ){
magma_index_t diff = row[i+1] - row[i];
if( diff <= WRP ){
selection[ i ] = p;
sizes[i] = diff;
}
}
}// kernel
/**
Purpose
-------
This routine maximizes the pattern for the ISAI preconditioner. Precisely,
it computes L, L^2, L^3, L^4, L^5 and then selects the columns of M_L
such that the nonzer-per-column are the lower max than the
implementation-specific limit (32).
The input is the original matrix (row-major)
The output is already col-major.
Arguments
---------
@param[in,out]
L magma_s_matrix
Incomplete factor.
@param[in,out]
MT magma_s_matrix*
SPAI preconditioner structure, CSR col-major.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_sgeisai_maxblock(
magma_s_matrix L,
magma_s_matrix *MT,
magma_queue_t queue )
{
magma_int_t info = 0;
int bs1 = 512;
int bs2 = 1;
int bs3 = 1;
int gs1 = magma_ceildiv( L.num_rows, bs1 );
int gs2 = 1;
int gs3 = 1;
dim3 block( bs1, bs2, bs3 );
dim3 grid( gs1,gs2,gs3 );
dim3 block0( 1, 1, 1 );
dim3 grid0( 1, 1, 1 );
int blocksize1 = WARP_SIZE;
int blocksize2 = 1;
int dimgrid1 = min( int( sqrt( float( L.num_rows ))), 65535 );
int dimgrid2 = min(magma_ceildiv( L.num_rows, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( L.num_rows, dimgrid1*dimgrid2 );
dim3 block2( blocksize1, blocksize2, 1 );
dim3 grid2( dimgrid1, dimgrid2, dimgrid3 );
magma_s_matrix L2={Magma_CSR}, L3={Magma_CSR},
L4={Magma_CSR}, L5={Magma_CSR}, T={Magma_CSR};
magma_index_t *selections_d = NULL, *sizes_d = NULL;
CHECK( magma_index_malloc( &selections_d, L.num_rows ) );
CHECK( magma_index_malloc( &sizes_d, L.num_rows ) );
magma_int_t nonzeros;
// generate all pattern that may be considered
// pattern L
CHECK( magma_s_mtransfer( L, &T, Magma_DEV, Magma_DEV, queue ) );
// pattern L^2
CHECK( magma_s_spmm( MAGMA_S_ONE, L, T, &L2, queue ) );
// pattern L^3
CHECK( magma_s_spmm( MAGMA_S_ONE, T, L2, &L3, queue ) );
// pattern L^4
CHECK( magma_s_spmm( MAGMA_S_ONE, T, L3, &L4, queue ) );
// pattern L^5
CHECK( magma_s_spmm( MAGMA_S_ONE, T, L4, &L5, queue ) );
// check for pattern L
magma_sselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 1, L.drow, selections_d, sizes_d );
// check for pattern L2
magma_sselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 2, L2.drow, selections_d, sizes_d );
// check for pattern L3
magma_sselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 3, L3.drow, selections_d, sizes_d );
// check for pattern L4
magma_sselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 4, L4.drow, selections_d, sizes_d );
// check for pattern L5
magma_sselect_pattern_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( L.num_rows, 5, L5.drow, selections_d, sizes_d );
//now allocate the roptr for MT
CHECK( magma_index_malloc( &MT->drow, L.num_rows+1 ) );
// global nonzero count + generate rowptr
magma_sselect_rowptr_kernel<<< grid0, block0, 0, queue->cuda_stream() >>>
( L.num_rows, sizes_d, MT->drow );
cudaMemcpy( &nonzeros, MT->drow+L.num_rows, sizeof(magma_index_t), cudaMemcpyDeviceToHost);
//now allocate the memory needed
CHECK( magma_index_malloc( &MT->dcol, nonzeros ) );
CHECK( magma_smalloc( &MT->dval, nonzeros ) );
// fill in some info
MT->memory_location = Magma_DEV;
MT->storage_type = Magma_CSR;
MT->num_rows = L.num_rows;
MT->num_cols = L.num_cols;
MT->nnz = nonzeros;
MT->true_nnz = nonzeros;
MT->fill_mode = T.fill_mode;
// now insert the data needed
magma_sselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 1,
L.drow, L.dcol, L.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
magma_sselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 2,
L2.drow, L2.dcol, L2.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
magma_sselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 3,
L3.drow, L3.dcol, L3.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
magma_sselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 4,
L4.drow, L4.dcol, L4.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
magma_sselect_insert_kernel<<< grid2, block2, 0, queue->cuda_stream() >>>
( L.num_rows, 5,
L5.drow, L5.dcol, L5.dval,
MT->drow, MT->dcol, MT->dval,
selections_d, sizes_d );
cleanup:
magma_free( sizes_d );
magma_free( selections_d );
magma_smfree( &T, queue );
magma_smfree( &L2, queue );
magma_smfree( &L3, queue );
magma_smfree( &L4, queue );
magma_smfree( &L5, queue );
return info;
}
|
73bb88490b8fd6d3bb6bf1af8d4082e3fece084b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
float h_A[]= {
0.5203431404205534, 0.8397212236917517, 0.8480297885975157, 0.5826219921812311, 0.8835936178913075, 0.5035784748336407, 0.7515095002498209, 0.9251304241177449, 0.7090255192089898, 0.8358676530410938, 0.8610267321433007, 0.5111123121975225, 0.5228948919205396, 0.8433140045336898, 0.8026350145159813, 0.5784592072770343, 0.5786629850526193, 0.7016442448995324, 0.9064564934513507, 0.8288443418591398, 0.6276902458280357, 0.6203700901002256, 0.6468332074041738, 0.7369908913233524, 0.636306788337418, 0.5253791181005871, 0.8769324735620941, 0.5420684137293498, 0.6508777416335889, 0.8821156914037118, 0.8976250606466047, 0.6087595765813151, 0.8708529435738905, 0.9156211084511412, 0.6490906569126473, 0.7461930584917886, 0.7079483669140391, 0.5892851638022565, 0.6432305871550201, 0.5707314888427635, 0.8474908995534844, 0.5726122299939299, 0.6585698746709817, 0.6302103230255871, 0.7657838012516989, 0.8087807480983893, 0.9210323533924294, 0.8955543439765705, 0.5583321562952835, 0.708213489759663, 0.8416640038215653, 0.6330836032968999, 0.7201827739505046, 0.9321913054218232, 0.7020747442540791, 0.7053848528741964, 0.9771554556504422, 0.5609698632907133, 0.5315478943284, 0.7876828456394924, 0.5579599312495511, 0.6366342251557487, 0.8364642688013271, 0.9236731402122698, 0.6253979889733297, 0.6825800606759242, 0.9238888155829234, 0.6074425006711761, 0.6908413724574975, 0.5104678654098072, 0.9802203729125396, 0.8828337394804235, 0.5250817946344204, 0.70397495668948, 0.7729071667971227, 0.735975147985976, 0.5653131245613776, 0.7575126172072388, 0.5751377499739115, 0.8024426496853787, 0.8915080767630985, 0.6212695660026939, 0.8087143486195272, 0.8107431743984268, 0.8025250072478772, 0.5300264243571515, 0.5829735010271118, 0.6818140734353173, 0.7745569279648881, 0.9947518710683161, 0.904275093601373, 0.6972181232397444, 0.8240464942563854, 0.5231453145673379, 0.5091364629855013, 0.885059504946083, 0.6222865977997725, 0.9496123412793068, 0.9742295591013548, 0.6775681294467919, 0.8748045298794692, 0.6359858312355736, 0.6876187317033112, 0.9236274294753131, 0.690211699853871, 0.6228848727884637, 0.8577516942663482, 0.6732975029748434, 0.8801466872283896, 0.917260529958412, 0.9826311419772767, 0.7193558438522033, 0.8324480784589878, 0.9328799835470047, 0.7339317804794636, 0.644083753667136, 0.7272132590971723, 0.5918541967807439, 0.6383669203464157, 0.7971867704819374, 0.5418019895417568, 0.960757842379981, 0.8538155006150716, 0.5170548275754467, 0.5486801645884092, 0.6716797662333205, 0.5082173052270811, 0.5250332622756126, 0.5860733912750284, 0.9633063812374363, 0.589115057309223, 0.9210013787595408, 0.777349725023708, 0.8321791480369274, 0.8086622981246018, 0.8576255427406076, 0.8025111002257476, 0.963913613664537, 0.8985864318102288, 0.7136332256952942, 0.6260023334705365, 0.7623378153743448, 0.692904374970135, 0.56478856896642, 0.5780435356751124, 0.9221311782392646, 0.850647007139613, 0.9546493052685845, 0.742091024204724, 0.9923063172877842, 0.7162066763553048, 0.9752569385794214, 0.9847684066286679, 0.8000828899754004, 0.6483585309598155, 0.5145761504298918, 0.7728986587297918, 0.571893701449103, 0.5570382411123982, 0.8142794188165889, 0.5640749450201907, 0.5703354728556698, 0.6727772585781636, 0.5373639670721617, 0.6493939056546429, 0.7816280453440945, 0.7852313971353728, 0.6669601081226659, 0.7152824330414025, 0.9087963461149877, 0.5629025948317353, 0.5226951240721222, 0.8884100828912305, 0.5945879418911111, 0.7165184567397249, 0.6815164198963927, 0.9578082873907765, 0.5370408889397291, 0.6872951422633078, 0.7700489996601028, 0.6647774966937952, 0.6105783333401239, 0.80106307832013, 0.7886757785212628, 0.9073398643564696, 0.6949801819937675, 0.8826323485122318, 0.9274975684571378, 0.8464947144708868, 0.7141601849435762, 0.8111790174913063, 0.5763076605057995, 0.5339216582459398, 0.8130581142835516, 0.7959929545403218, 0.8030484675140027, 0.574959029701118, 0.5236677290527036, 0.6572891248604242, 0.8446096183430976, 0.5656027968607965, 0.7093239835556415, 0.6587263537731841, 0.5713833321692854, 0.5612106629534684, 0.9700281238199759, 0.5514705706060269, 0.6388550920157673, 0.8519688924448466, 0.8786574280045296, 0.6038599787818402, 0.9416941138117794, 0.6358612241110977, 0.8883704966811234, 0.6996190723416105, 0.9956088931169134, 0.8478241997193303, 0.6901642397367104, 0.8128114248134564, 0.6810427891619766, 0.8204095406291232, 0.9621985629562867, 0.856367981697269, 0.7653269594599583, 0.5987679760230213, 0.589995794204615, 0.5911120106298475, 0.6026776533838003, 0.9977219637801666, 0.8214772892477158, 0.689449436241151, 0.9138267458928123, 0.7884456857313399, 0.6362271185118022, 0.9542787691026922, 0.9951864189710133, 0.8463769665741032, 0.7688950457313333, 0.5665342455748519, 0.8295428719744601, 0.6057766083356046, 0.5577148007093304, 0.8745160877131563, 0.9079123759373582, 0.7551401106583036, 0.7487469164800666, 0.5141307413066468, 0.9692700283133449, 0.5812865011517745, 0.8202942948141845, 0.9570295538375908, 0.5176189345807687, 0.7591666083004392, 0.5341912971642238, 0.5853285510634902, 0.7604518143749598, 0.9710591468406042, 0.9543487942046693, 0.8138051275076188, 0.5818889405578049, 0.5664982816595141, 0.585963715221935, 0.8343838991230255, 0.8454397011375565, 0.6160931399511154, 0.8539195715572916, 0.8895639195108132, 0.9429636689318324, 0.611495721841149, 0.6117046330257163, 0.9894967203901341, 0.7521397422548455, 0.9617920890879745, 0.8009040359282134, 0.8365388313621254, 0.7086223197973793, 0.6761395667914698, 0.7104839326502712, 0.5818943252981688, 0.5711469610029383, 0.7867090520924566, 0.8116272798680475, 0.9233863740820099, 0.6839733274237645, 0.5084015908056441, 0.7755782097457264, 0.8218920257799069, 0.7618632525227906, 0.8478914900695964, 0.876673622985092, 0.8177494265091705, 0.7983190918419649, 0.9216389192842236, 0.5644462757298133, 0.8991894436630357, 0.5496826013438256, 0.7100998144584764, 0.7036120049829749, 0.850097458501377, 0.9066459789622109, 0.9755218742966907, 0.7268819248339657, 0.877474368407867, 0.5347710153725337, 0.5063588460695527, 0.6599220403085961, 0.9224493886278775, 0.8207356018895275, 0.9796445415394676, 0.6853064775009721, 0.6211235849246309, 0.5546449642118234, 0.950916063558439, 0.5724712842120043, 0.8992875345051112, 0.5408635564762417, 0.8096707297810115, 0.7793897589622241, 0.7551693658343366, 0.7324034479293573, 0.9044752427090397, 0.8512337140305412, 0.6887054806128257, 0.9596882208483619, 0.6516344173214931, 0.7919901948829373, 0.9225737257139351, 0.813415864872998, 0.948992769860517, 0.6853207450650097, 0.6701230112371092, 0.958573924128074, 0.9279378405436631, 0.9703550136169539, 0.9211311966032043, 0.9318534087532111, 0.712245872911764, 0.987116829582855, 0.9677560184550125, 0.8749252080414578, 0.7891982787906784, 0.8265633654097493, 0.8623298338573049, 0.8089995809884563, 0.9648457716265586, 0.7080087719897789, 0.8839873939905675, 0.8217783159712708, 0.9949091522142777, 0.7365734095311951, 0.7144160224657027, 0.8295522696043087, 0.8844038175624909, 0.6034257843559996, 0.584688355583855, 0.7701831476290281, 0.8723744152040341, 0.5712998213415161, 0.7708436787458288, 0.9270974731027568, 0.7623013580233775, 0.9681766390827937, 0.7951787637858218, 0.6354765612265592, 0.8628034252257315, 0.7004969580372409, 0.5971448500277081, 0.7619132207055563, 0.8062732827126173, 0.6293800642528911, 0.6679001053583536, 0.7447432286970059, 0.5883280432499318, 0.6648886888462285, 0.9919098275818704, 0.6454721883305665, 0.8551260384330852, 0.96892819365192, 0.9992647790919129, 0.6517765592493302, 0.7385113259323808, 0.6928824332974053, 0.7192432989972123, 0.7644417318845556, 0.8520422094382372, 0.792226326599531, 0.8449640749860251, 0.9158095681658234, 0.9372374558159082, 0.5814827992915361, 0.5743005008535625, 0.5858684451407912, 0.6358496207796487, 0.9100675401447933, 0.5843955612726414, 0.7620604959569013, 0.8253898593314432, 0.9462721086213828, 0.9185749740064597, 0.8721959907170634, 0.6503909461233254, 0.6656877572362729, 0.5068126262248784, 0.8528155530512893, 0.9430928401512835, 0.6652012098221841, 0.7980382553310637, 0.6273770214950631, 0.6983865965558143, 0.6021520412298705, 0.7788983545594592, 0.7339058967770653, 0.8082232533609396, 0.6238319790201647, 0.9669195265967436, 0.9822457311069832, 0.6062665729723207, 0.605216102712256, 0.8751600168892995, 0.8182337065093676, 0.5575531987449622, 0.7175055291593618, 0.7836170131470848, 0.9373162797355481, 0.790982537136969, 0.6200243135528545, 0.7983454917746677, 0.7633836164226173, 0.8408622775531398, 0.7888714595295054, 0.7519640156999186, 0.6877016343353599, 0.7906722981731363, 0.6417579796264768, 0.7768827041803019, 0.8820497380987047, 0.5895481786846886, 0.7650256857143349, 0.9920061517944667, 0.6081742699246736, 0.999197396932882, 0.5706831128690832, 0.5725763873415564, 0.8770996842554502, 0.5302838000687682, 0.792217275929586, 0.7743367639798118, 0.8836728200536803, 0.868564117843624, 0.5893825309103851, 0.7961192891254443, 0.9562198406824782, 0.9109815756403485, 0.506328441635111, 0.6802003594876924, 0.8512701149541887, 0.5616015838442878, 0.8493476108595746, 0.6613029658953269, 0.5652684746110833, 0.9691783735778201, 0.5221179694587872, 0.9073163381173976, 0.627246243137737, 0.7179346451971368, 0.9945991025261163, 0.7645935460039308, 0.5387800159027462, 0.7658588707077243, 0.5189598402873941, 0.96484976190008, 0.6044921226861688, 0.8254201321049424, 0.9660598615390925, 0.5690463818491074, 0.6031633992192171, 0.9637500601197808, 0.608328549379155, 0.7506157563737685, 0.7440058344249522, 0.5597773243005001, 0.6286460123274297, 0.8666956030511437, 0.6723504472167229, 0.5249480941695845, 0.9486637302754023, 0.7807664692988496, 0.970570206862918, 0.5813759071441842, 0.8961155368864477, 0.5266578596168554, 0.9348410245884027, 0.9593770243571289, 0.9975634388826823, 0.6110666561252437, 0.9769972330870186, 0.7227803923048159, 0.8194819673577332, 0.8882969732676528, 0.7076315991703114, 0.5530414851344183, 0.744347676524571, 0.594497450346031, 0.5591802736385729, 0.5432897109831825, 0.5588675251201456, 0.8687796197534428, 0.9528396011669785, 0.7842315256776919, 0.9353180793492037, 0.5977285190594618, 0.8288261446626921, 0.795319843998712, 0.8679706848266565, 0.683725407060469, 0.9473139150709045, 0.7285700890082529, 0.8612448099030573, 0.9186458569433607, 0.781836666887874, 0.9110081528811045, 0.948389847009768, 0.8308827260968211, 0.7641714160526736, 0.5687695870054921, 0.8397198568422363, 0.9776849291591985, 0.9075101417317111, 0.9385805519159498, 0.5911416791309676, 0.6515923321532042, 0.6204125407521794, 0.6908862920849703, 0.802377365133198, 0.9114836101096723, 0.669925370291955, 0.7152170617824956, 0.8865751447470764, 0.7089869660264706, 0.5299252129320293, 0.8958854025051104, 0.6027214832699652, 0.9417300091115473, 0.6569422494786454, 0.8686788225945012, 0.5516246645361033, 0.5267589569344293, 0.6748196347720861, 0.6733393954764071, 0.8563800673545152, 0.8364634037231913, 0.9848307893663228, 0.9361242607992672, 0.7001626997126114, 0.8722978356997989, 0.6509092902366528, 0.9466928314712872, 0.5538922012286112, 0.8198884365801742, 0.7502687536246485, 0.8388016588299139, 0.939758462227133, 0.9732580822057626, 0.9941705338170446, 0.6110457522678975, 0.983343860325391, 0.5983059871049331, 0.8796802234461037, 0.9042623410368853, 0.8227489680122313, 0.9855761017031992, 0.8065499566899692, 0.6760744452929882, 0.5968162525271443, 0.5362903914807273, 0.736778604469406, 0.9636213485840475, 0.73191619963761, 0.5596476318819541, 0.8676141025564168, 0.5704183069893747, 0.6782030982823384, 0.799281446606053, 0.655857432454934, 0.5568168652542784, 0.7278040947211806, 0.7346887841472738, 0.6359725283230153, 0.6515078466727823, 0.5000686452476554, 0.7908049012464531, 0.5538837548867168, 0.7290990791843106, 0.9968074875787645, 0.6621444770788969, 0.9242414620330964, 0.8103095239105493, 0.8076833947743068, 0.8891472167746592, 0.604244834572546, 0.8470412770702933, 0.9843473158504954, 0.5779948405024532, 0.9278792321925344, 0.8519513163665182, 0.7658329817372078, 0.9737243125202961, 0.7355063041440411, 0.5340082023618995, 0.7496018230860987, 0.8991691245729068, 0.7267891371726054, 0.8933747452345512, 0.6198291005354284, 0.7764771371611462, 0.95562138710989, 0.6943045011892621, 0.5860425062432495, 0.8380134193478634, 0.6675149862515524, 0.7943271752843486, 0.8565755505161533, 0.5337251728967052, 0.899775733254998, 0.9286892345821323, 0.7550667305125661, 0.9045708782250996, 0.7551171884322699, 0.7772017952349304, 0.9726039974345146, 0.6852302529609383, 0.6188860087590363, 0.8164218744728308, 0.5516002642868958, 0.5125858499711231, 0.9934170797470344, 0.5400047081978151, 0.8446993577299244, 0.5443629182994096, 0.6008909335499308, 0.7937995343911937, 0.7809853005446483, 0.619705821628179, 0.8673803800999491, 0.5089047303597505, 0.7346947389771202, 0.895759030394984, 0.5883166364737163, 0.8019206609828426, 0.645807661031081, 0.9610067940476148, 0.6409373016906703, 0.5890632020189965, 0.5966432605714889, 0.8663420606635275, 0.6542472383647144, 0.5900506320816454, 0.6948881547507954, 0.8710179824980828, 0.7556334981519002, 0.922689865671378, 0.7953903534160092, 0.9357811372754385, 0.5099131653040377, 0.6142064927601363, 0.849004728308443, 0.7527168273772946, 0.7721132544244116, 0.9377253317229208, 0.760303132322157, 0.6275790340770389, 0.7865815347988849, 0.5958143008521499, 0.5930424002663507, 0.9228704000858796, 0.8643501970699207, 0.8219069398799044, 0.6497902591794578, 0.7737100682187628, 0.6999300417816896, 0.7199568269121205, 0.6329478428188232, 0.5387372514485208, 0.6145019539842349, 0.9471977255280037, 0.5199033069466388, 0.8595412017263903, 0.5758344555922172, 0.8176914509990199, 0.9019793287487988, 0.7523398354364187, 0.9826369566098909, 0.600129996906428, 0.9805473361239292, 0.9681203966263335, 0.823761364408897, 0.8275846982160338, 0.6254797225174762, 0.5621820483438537, 0.6926967817645722, 0.5942799523328575, 0.6473912625559249, 0.884188210984612, 0.7806743225891769, 0.7052676421041517, 0.5679980982618972, 0.5549557322601153, 0.7828117435760831, 0.9856942766414128, 0.5207944705952222, 0.9712851623839505, 0.8842022877875596, 0.8735204031002208, 0.7918783413003059, 0.662970182852041, 0.839085822204849, 0.6421558749155013, 0.7840711701534384, 0.7633362657995222, 0.9336332957912697, 0.6337901621853318, 0.559335602118759, 0.7160860742589144, 0.607429856150081, 0.8442134994303342, 0.8568819891476316, 0.6076491270548179, 0.7677322784468138, 0.7364180079235931, 0.7340231528229857, 0.9155256481708671, 0.7802545028415374, 0.9708617845349503, 0.693970340953143, 0.6270365582671611, 0.7471410569409986, 0.5212891662487005, 0.9258701002048482, 0.9788316737053031, 0.9814830128701484, 0.5931470483738288, 0.5473486966538252, 0.5154634437282494, 0.6692743914563233, 0.5823832319505364, 0.7726970116813722, 0.6050018087721545, 0.5385756705190033, 0.6220741267978971, 0.595447701432347, 0.567171882777969, 0.9827564877905901, 0.8639183983269502, 0.8449803779056758, 0.5984158081515518, 0.6672755381043416, 0.6129770251222896, 0.9519509324820585, 0.9443548927537069, 0.930026037935639, 0.632000113744978, 0.9763414509344327, 0.7144332723103214, 0.8789558575127457, 0.5366363845582192, 0.7290195480683012, 0.9578821360381251, 0.6424413127919231, 0.8431090518127221, 0.7003620897303027, 0.579065628043969, 0.5039640265178852, 0.7757588325588183, 0.9764508815691322, 0.9907032216167453, 0.5055892914881991, 0.767900071488195, 0.6996478231356794, 0.7420722563955765, 0.891922984006845, 0.9250226396834449, 0.5462276719142225, 0.7035003974009588, 0.5336917630533915, 0.7152082166392502, 0.7251742708445106, 0.7509989581193668, 0.9301433175089546, 0.7438051383514331, 0.6060079714083072, 0.6238150169464615, 0.738771292286822, 0.8442030650473165, 0.6791100011209934, 0.9734617437765596, 0.8422036949379736, 0.7004438096133097, 0.8482822223172912, 0.9212111675610329, 0.9972072339377982, 0.7582100937690708, 0.8761769909199051, 0.5058702092029187, 0.9332384173215658, 0.5302358045311276, 0.5458152894096422, 0.9768560645703076, 0.9749536332589055, 0.8835146700709409, 0.7256720430818793, 0.8052736690490532, 0.8697131565435992, 0.5114453488127082, 0.7252488608263322, 0.7430935587154945, 0.6875257493248554, 0.602212475128676, 0.6415222954332838, 0.815475766522928, 0.7911746541874881, 0.8777085823554018, 0.555809635806753, 0.6652193238271213, 0.6188384724418123, 0.9778558882118971, 0.7451939526787021, 0.6431924220740588, 0.5947552938090861, 0.6195761649751095, 0.9404660324660787, 0.8944618542043291, 0.8445948267896739, 0.9072348325132218, 0.7451458680084253, 0.6011995378884523, 0.6650616963954223, 0.8180782556503694, 0.7999011737920103, 0.6483912726794674, 0.7411734343608426, 0.8839845360039944, 0.8477541892068341, 0.959515068629005, 0.7278713012213751, 0.8312621970657555, 0.587896671988565, 0.7251999331080521, 0.8772997253147354, 0.7079438760844243, 0.8524693583481937, 0.889994998727714, 0.6780076452302286, 0.9455824324753717, 0.7568994353636314, 0.5889174432805003, 0.9338588557696317, 0.7894634349625331, 0.6438385747510796, 0.9760094080269457, 0.9343700804645332, 0.7294289522269981, 0.756148419408781, 0.8390369692684783, 0.821394106739417, 0.7044517012025596, 0.5720261446341461, 0.6430402528106016, 0.5324854298967597, 0.9924998594171842, 0.9694243150877305, 0.6809347788774234, 0.5579201961590581, 0.6999653149017011, 0.845552480119644, 0.8124155327932256, 0.5132477205266918, 0.9563086688409959, 0.8915275784557943, 0.5906105302720789, 0.696041279114745, 0.5772529733136507, 0.8401603197558671, 0.5298858951687688, 0.6551254216975797, 0.561539912635022, 0.7474132224923782, 0.9871534125926272, 0.7905444225496306, 0.5129888217453529, 0.9121231531462695, 0.5462322459149856, 0.8765891094628455, 0.9923245183813351, 0.565333700093506, 0.8322372200402326, 0.5970147354131371, 0.8385331467611374, 0.724919431763428, 0.6236745205371024, 0.5970401247566768, 0.8186057783949316, 0.9804893535857764, 0.796460057382139, 0.5416327666319141, 0.7319673422386923, 0.5368046005825864, 0.5813699144487263, 0.5655763076059662, 0.9381705167896895, 0.838818996591592, 0.6950337666512321, 0.5762144692251068, 0.7198391186689008, 0.918656201320061, 0.8970031717431219, 0.8990761943986982, 0.5596584811013245, 0.5854678366731532, 0.5503784438570838, 0.7838309580586269, 0.9174478468090642, 0.9567038430007015, 0.7300472761105503, 0.9805471290407534, 0.817393489086906, 0.9522644870533603, 0.5442715500804288, 0.8658475372258978, 0.7337407975994914, 0.7194094451562902, 0.6393270683825731, 0.8266150915832722, 0.776657754768442, 0.6016735055321512, 0.6699759829932318, 0.5044946224839104, 0.5457717824126062, 0.753046656136402, 0.9444728266588731, 0.8095631104735315, 0.6098818192457014, 0.7901678007478921, 0.803997916333908, 0.5956739831469664, 0.6722588859631944, 0.5502851200461796, 0.9763601222981373, 0.8770240639126494, 0.5114049169088932, 0.7129608373789912, 0.5601351195970135, 0.5827438429918719, 0.8735380556415737, 0.7340878564402242, 0.8139492015896806, 0.6780680805361561, 0.7537507191726938, 0.9351783567916053, 0.8278971121633522, 0.5272513487781207, 0.6724917319567842, 0.5509020943961771, 0.7047944146439766, 0.6540969846404678, 0.524538414954916, 0.5451320741322174, 0.5733207694321104, 0.6590975478103342, 0.69790888332778, 0.8986211718900362, 0.8829463265040003, 0.9129021685650457, 0.8958406651819171, 0.6460508981267963, 0.9964506222912196, 0.7958157178320893, 0.6291682692770957, 0.8921978317555672, 0.5160922633160063, 0.7166444224995214, 0.5237136055351799, 0.8334728843512316, 0.8237705225832834, 0.923501782095046, 0.5986235672130813, 0.6430617379905922, 0.7006217410479396, 0.7562152239639102, 0.6578790714274985, 0.9318747248096103, 0.8576669598377575, 0.6002176602870912, 0.5073553267851922, 0.8825043644842356, 0.8744301101760703, 0.9225245194866969, 0.8420770356372534, 0.5479676417285753, 0.6164366820898666, 0.7570384765291585, 0.8257340703121155, 0.7599362235141864, 0.5065550755875443, 0.7380035749004414, 0.5837168272814646, 0.6135643512092867, 0.8498237722800006, 0.9213172097178708, 0.9458553266982935, 0.9026281564998072, 0.5022506703544083, 0.9699448623456928, 0.6906709371440081, 0.8473972560954269, 0.6295943998307648, 0.8717680057294135, 0.9747166801369745, 0.5595044534715949, 0.9511126666445299, 0.9047119932639807, 0.9868744132207312, 0.6559957457330656, 0.8113904544495354, 0.7735628470083056, 0.9358904300033136, 0.7571914346024271, 0.9927957185128167, 0.6306507204676662, 0.7514330850924232, 0.6417818178409717, 0.9255151640148536, 0.7562847358809509, 0.8668699321394504, 0.6852489483773003, 0.8793157467785429, 0.6458319929114544, 0.573929955491548, 0.7492309230921541, 0.8939810061112827, 0.5861806368619327, 0.5162598059985956, 0.7082458911107726, 0.6180028273348575, 0.6450299782519979, 0.9171716302889824, 0.8124390634206304, 0.8275334925349394, 0.8080074658854361, 0.9979156687121333, 0.9846907222936097, 0.5777377738164655, 0.9231122011052622, 0.5590950577432684, 0.7026427375016112, 0.8315577656527, 0.7474472802187326, 0.6035979685278109, 0.6556446642242835, 0.512785035149196, 0.8460816833681668, 0.9747423803243317, 0.7911232795239492, 0.5499217528076299, 0.8285126474646913, 0.8341278006246383, 0.9130590406938148, 0.7777862504029252, 0.663509015406857, 0.9842260846094704, 0.7018571281052195, 0.7045679683522672, 0.6917859734937342, 0.6254901246620483, 0.7465028122150963, 0.8040013361338898, 0.9175367755819696, 0.5830897551637492, 0.5509766501317258, 0.9489881822846149, 0.7796932708958798, 0.9370362731305919, 0.8994230993470766, 0.99307507568641, 0.5447097778014195, 0.9769962585061944, 0.8266346883644631, 0.5006712795808547, 0.8957451252078883, 0.9103499975463555, 0.7827508711999019, 0.9538458760446562, 0.64893573038114, 0.8191827795354086, 0.7441820351724975, 0.7703527947733897, 0.9257734869983234, 0.8084752343550106, 0.7319367175455773, 0.690029648477877, 0.5298633136548956, 0.5462724830988968, 0.6580553241738174, 0.9912722094165247, 0.6221057168621316, 0.789668623884626, 0.7355361871866353, 0.8138839066198622, 0.6507915482542166, 0.8629638215062921, 0.5271060942861767, 0.6244008402794485, 0.676498032052538, 0.72895474706462, 0.9774352430075932, 0.7952096178581665, 0.9944249185146229, 0.7178654814494182, 0.9966663510767555, 0.5214105099237232, 0.795117494837063, 0.976591460461985, 0.9593948411586474, 0.5714591462452955, 0.7789510499431094, 0.9680486028928, 0.7918913269437908, 0.8295129971642379, 0.5046989812488651, 0.8056414814274389, 0.7103202468683657, 0.5058791343340472, 0.5144334209978454, 0.6686289125401228, 0.5720259506565545, 0.6820763026610477, 0.862621130758511, 0.7414720760565694, 0.6632457997141316, 0.98037855777554, 0.7387772594157296, 0.5754523689998166, 0.8714126543220012, 0.6287204937866904, 0.5409004439400739, 0.7647289300562861, 0.5666768548486394, 0.6199871465094358, 0.8809590304186179, 0.6414647530675899, 0.5392445793738048, 0.994028744227816, 0.792657915026832, 0.5291318885989175, 0.8105458957315578, 0.9294693058347192, 0.8876442052290534, 0.6409453507345393, 0.5178722153238687, 0.8091991388981701, 0.5132989585464276, 0.6370437361677792, 0.64074517651477, 0.9160892983765707, 0.7569012920080975, 0.9648549325283411, 0.9591808036271094, 0.5907486683689356, 0.5708671656887871, 0.8798709294357394, 0.9324726231796894, 0.9346174441053393, 0.7590646354884953, 0.9583402474246262, 0.7916971009135567, 0.9053724536101235, 0.653645779538449, 0.978053575352319, 0.5905165484315604, 0.6501455241998173, 0.716945359094298, 0.8086038963741343, 0.971032504597825, 0.5569190666561104, 0.5622913232568565, 0.5889896952809448, 0.9064351508826164, 0.6553937222789008, 0.5803407848927453, 0.9216387422938602, 0.5097355452536785, 0.7382970260693771, 0.796178468300532, 0.7869681164169223, 0.669142315230784, 0.6358696041894161, 0.56160595121595, 0.6191941414758719, 0.7520839165492743, 0.5188603111689393, 0.9073452763987215, 0.8397788818196545, 0.9152440603244163, 0.7387305034625775, 0.5202766128977998, 0.8990228233116269, 0.5528583375153835, 0.9023655914742497, 0.5513659977991408, 0.8924468217644991, 0.9211174564972089, 0.8167206519662946, 0.9386936753517023, 0.537177040505386, 0.8095915835469716, 0.5494556288325593, 0.7397163713107153, 0.592580091608851, 0.9322811074608024, 0.5864061669696488, 0.5177363028093162, 0.5896303541646862, 0.9535658193091422, 0.6783439163074698, 0.948062309226783, 0.5810296051252538, 0.5429262489107709, 0.8814978162792311, 0.6844196975032989, 0.6516988103811462, 0.5454739125992032, 0.6647767690754505, 0.5306614772176993, 0.5730298070624753, 0.753074121934197, 0.9994990098815091, 0.5001134097940839, 0.967706796622857, 0.9131248875676283, 0.7202335282743132, 0.7598766618114785, 0.9072583434469452, 0.8019033181439346, 0.8998610527948911, 0.7237393666145113, 0.9881148912926256, 0.7524237752089138, 0.6866976812095869, 0.971337230347452, 0.9453357056020155, 0.9489069828905826, 0.8060978547878884, 0.5699920137465729, 0.8943752208296756, 0.5201287694746346, 0.7948663960416289, 0.998928753232134, 0.9640429499363918, 0.5411170322093675, 0.8721944945920755, 0.9063624324890218, 0.6080338706442721, 0.5411734296769266, 0.7690143186218528, 0.8098463516421018, 0.8739269796659299, 0.6436066746222682, 0.7321156345453628, 0.9298384686771402, 0.5986178633012758, 0.9140287587645872, 0.7775066770384842, 0.6484813334101405, 0.9382939490781668, 0.6801459127555628, 0.7288336250022417, 0.5175154238625103, 0.9448561870824893, 0.8970631486623235, 0.7938322720547379, 0.7881568132332419, 0.8752071864550053, 0.7698984938180014, 0.5757975209333235, 0.572610779694349, 0.5877912269574644, 0.7259311101020345, 0.9159395593163915, 0.7549315181573566, 0.5069913013553122, 0.8304194039249647, 0.902969844968913, 0.837905670355026, 0.7333185333082317, 0.9080195400979283, 0.6477242597564518, 0.59182126792435, 0.976323629691564, 0.9679570658287533, 0.6624532714275224, 0.7397691314684631, 0.8653918050567403, 0.8136708840536383, 0.6443767605478452, 0.5003259966173008, 0.7564286110380961, 0.6631366214305634, 0.6208496703239124, 0.6473661782639125, 0.8331037302511687, 0.5358238316142165, 0.7129487398093723, 0.6328191979697364, 0.6358841837354785, 0.5960847616932881, 0.6989320094243776, 0.9671908966334245, 0.9863464556088242, 0.5101681584929578, 0.9269844998520675, 0.6216344570774411, 0.5371060462986181, 0.9389528776117206, 0.7961345537598273, 0.9440279947430749, 0.79376775545835, 0.8632157331829153, 0.5759165008363596, 0.6033378491305945, 0.5576388296790791, 0.7868137971513787, 0.6315162896814759, 0.6632668234801937, 0.697778935149439, 0.5982477870771645, 0.7983356430499949, 0.8123280397025884, 0.6649746536716156, 0.9480545853279083, 0.6108979007726567, 0.9821021914059636, 0.9921916303542122, 0.6830811305609689, 0.6861169161593145, 0.7933920503130898, 0.7063808339023825, 0.8190125501046603, 0.6791940736043598, 0.977908978561784, 0.5363417427793209, 0.8910605747514241, 0.7366970461112186, 0.6127000729916179, 0.7448180968760343, 0.7584215489833537, 0.772363098340479, 0.6548653025836059, 0.8084957598223872, 0.5084163734360586, 0.6613664475123517, 0.7636424310356631, 0.635530448906945, 0.5584286905976932, 0.9759127255448106, 0.5285626383961926, 0.6954377492382187, 0.8438875795374488, 0.6380730782810475, 0.9120241130716307, 0.9879028214120056, 0.7299182532245484, 0.9636559392126565, 0.5775256070598442, 0.9058561429531095, 0.7835105131542026, 0.7778949121382984, 0.8747370334933514, 0.6864536701997771, 0.9201138610921709, 0.7373234810426442, 0.8056138290890525, 0.6792556652786215, 0.9487775938553176, 0.9654069746092575, 0.751151311962357, 0.7856629741360925, 0.7418765751257317, 0.9447007330580532, 0.9960699493204659, 0.5632599610254823, 0.7961105849322638, 0.7561806634975203, 0.8917677731737067, 0.5444041935492676, 0.9680873925532133, 0.7825106026278843, 0.9963855108145998, 0.8137349108470591, 0.8588974259100213, 0.9345011214170066, 0.8832711013901591, 0.7428337512159123, 0.8068810781018854, 0.8898621151564587, 0.5853777440177184, 0.7766007006332871, 0.9100566272471972, 0.8634232003759482, 0.8600211328969949, 0.8098689362147675, 0.640986273883789, 0.7000778014732041, 0.9658578755803624, 0.5450041308232897, 0.7246297856115569, 0.9923669714973984, 0.8187335315931912, 0.7068450752244306, 0.6539609356965435, 0.7188767866327692, 0.7330970300170034, 0.5779021006800484, 0.6799566516541508, 0.8646892160389315, 0.5011508056405751, 0.7313375460563163, 0.9878268598988857, 0.737445465578654, 0.830990828143181, 0.995176504713642, 0.6090403134558111, 0.9036906900452604, 0.9777524102655442, 0.6674360570850806, 0.6513888894715258, 0.9402774145032986, 0.7540571873612272, 0.5655249012218047, 0.9000114854176331, 0.9298876781821885, 0.8268963092949935, 0.8369630320240591, 0.8151817559014589, 0.8166608053894842, 0.510718415948004, 0.9161009222717525, 0.699818136187525, 0.6115870198600464, 0.8589313515295047, 0.5857461468832403, 0.600336892901308, 0.5801693404562553, 0.7082993303561882, 0.7825331769045305, 0.5424452682386324, 0.6485707484519981, 0.7642829661737807, 0.8616582659619376, 0.9296054448207317, 0.8233497204858753, 0.7170629556144286, 0.7604702357886233, 0.868092882466613, 0.7531394293491692, 0.7549533767687124, 0.8703838378877566, 0.7170964400819775, 0.8563437770318768, 0.9335954996864932, 0.8005457753157033, 0.8298916891564339, 0.6338327507842966, 0.6124640058254995, 0.7883743010868898, 0.9177814730511179, 0.7717297824719384, 0.742281951079655, 0.9794595976689229, 0.6819427587682136, 0.5178531723077794, 0.8778721043332283, 0.7703050666099576, 0.5965435270332897, 0.512991686509092, 0.808690985035728, 0.6284958975951214, 0.8234123803598199, 0.6957619482099913, 0.93624039892848, 0.6791465074318582, 0.9616574370376214, 0.5534894026632293, 0.5232072683602569, 0.5635017131704805, 0.9959620378208955, 0.7600154418513103, 0.8630110612560064, 0.597703284949842, 0.8051622999134341, 0.7304623437641053, 0.7810179107302051, 0.5117552518665003, 0.7863461203788792, 0.505780885269259, 0.9648751454613649, 0.6105000309802745, 0.863370136304894, 0.9674521047472769, 0.5748832641456463, 0.910821955116339, 0.6588169520389486, 0.8697709879920212, 0.660178856153621, 0.7421520474121703, 0.6251836364030954, 0.7890660965345964, 0.5764343064965345, 0.6154355661711252, 0.8463589051235896, 0.8127677973625811, 0.5999723306354792, 0.643658797083226, 0.8991500365140219, 0.5158415261559002, 0.7267007722214085, 0.7175562904399988, 0.7896545492186782, 0.7887674189454119, 0.5694702342108424, 0.7446773984592003, 0.7573253009288823, 0.7634188514719037, 0.7664712124318107, 0.6900872627721285, 0.8002547127713775, 0.6224343759328825, 0.8045667123177545, 0.5525819792359106, 0.6509661691810584, 0.6712575020482134, 0.5532775679467444, 0.6747020937862472, 0.8839480923183163, 0.599421456700505, 0.5956648220736389, 0.5587368183256761, 0.6131471953395963, 0.6191181551356093, 0.8182348891901736, 0.7113231565135372, 0.9514073541913952, 0.6559549622625626, 0.7817250975133876, 0.8650068770325149, 0.8706520992937342, 0.6973964142973288, 0.8190359725375034, 0.5888049890244229, 0.9538604439871857, 0.7414758509616142, 0.7089257614286317, 0.9717670097709377, 0.8638766368085564, 0.5316259878848997, 0.9739551051962426, 0.8346712042312796, 0.734893300422926, 0.7610745213372647, 0.7028347014672934, 0.8452971481015641, 0.6112816852241229, 0.5506251394165497, 0.6600821361794398, 0.7155354055153877, 0.8717135894905568, 0.709204403877712, 0.6734705121129304, 0.6111904645577256, 0.6320601617336148, 0.9323276487835788, 0.5108234159480649, 0.8412188755832721, 0.7132403881174709, 0.8529284922438392, 0.8412098084563937, 0.6624280333946915, 0.5580704342624663, 0.7938888215453289, 0.6075076995338687, 0.6096495184715669, 0.7957887155866106, 0.7668087861136399, 0.7374664259621262, 0.9279982783209729, 0.9159721070415282, 0.9082026953690611, 0.9769768245181893, 0.748636605072422, 0.5462080458166753, 0.7218676709915296, 0.6566638985617975, 0.7096989161606116, 0.9937125122253547, 0.9744896995873676, 0.6963222569464942, 0.5134578239415891, 0.5611934021611436, 0.9022381010957163, 0.9857481919748241, 0.9345279184711544, 0.572818988442697, 0.8876579626989434, 0.7389345235267571, 0.9380219727897852, 0.835237891655137, 0.8561365733454694, 0.894440668833046, 0.6426931275700198, 0.9471877091444312, 0.6451864275721664, 0.8196268218637554, 0.7203566595547943, 0.6744324358730331, 0.9797179106863233, 0.8669890104619313, 0.9284242845667803, 0.8109465400083249, 0.7541978654200987, 0.9880652067260531, 0.5419287542154259, 0.5146517765004776, 0.7559330575015037, 0.5380532029158753, 0.66663632864479, 0.9620513246400995, 0.7822057849878608, 0.8942416817572902, 0.9923740222115582, 0.9578506379082291, 0.7930448760525064, 0.7714950205391576, 0.931787679697216, 0.7872830670393858, 0.8016210628831963, 0.6658091820063575, 0.951022408017939, 0.6555568506056542, 0.6979369831393718, 0.6038654995949195, 0.513427136022502, 0.774748979801714, 0.8200444365986466, 0.9046295431206678, 0.7061613114161873, 0.8989245742778331, 0.5514180337962352, 0.7484579442273982, 0.7348978082538278, 0.948332940868498, 0.764892188797945, 0.5324953201292884, 0.8171481188141136, 0.5578163250881172, 0.6599847581133069, 0.8846191218518282, 0.6328651822440847, 0.8199755701895026, 0.7633297178575498, 0.5648510694898907, 0.791963814568683, 0.5663846097729586, 0.5685912474456254, 0.8554238330955537, 0.8011166304163029, 0.5473285441450836, 0.7866516514781688, 0.9228914443485154, 0.5759404363545362, 0.8705876951669993, 0.9023079046707028, 0.729682708011439, 0.7437691304032445, 0.6367839525280053, 0.5548727903907651, 0.8196091131601864, 0.9603512009512339, 0.5807169062570352, 0.862928199438639, 0.806541409824657, 0.5637517718570656, 0.9797157392645801, 0.7138540629236845, 0.5391028090179104, 0.7497321307017504, 0.7576744657369276, 0.9569691384053224, 0.6819267404755329, 0.9265020553412391, 0.6355467548901937, 0.9143092431037201, 0.9544447639413376, 0.803161993947278, 0.9472904512282327, 0.8672325478957397, 0.5542686621327595, 0.5732180629073106, 0.5354203006176698, 0.5228215246957268, 0.7995229362161893, 0.8869200404285922, 0.8083191948372388, 0.6838978608092179, 0.790063874344282, 0.623558150387501, 0.5769500659022602, 0.5245999518649531, 0.8543820455856732, 0.6071237898552959, 0.9531872976576119, 0.9159355303921055, 0.5664830767899578, 0.9628878924802804, 0.5251990786198633, 0.6311637760170613, 0.977634447226198, 0.5378683468147285, 0.9608440984584818, 0.626806864616311, 0.814555188649581, 0.7242154949377054, 0.5742998365556391, 0.6624953099373272, 0.8829403111418559, 0.7628751318502733, 0.9735498577266157, 0.7252919185466171, 0.849975118508629, 0.6418030320284317, 0.5541173227974752, 0.8366895959248848, 0.6088627836798127, 0.7469754805208385, 0.7072488831379831, 0.9783490023985337, 0.6955468981855961, 0.5129714749832937, 0.8101484147898315, 0.7455784354493711, 0.9594194086276243, 0.9962259232255255, 0.9651164229636484, 0.6928095343572437, 0.538962326254238, 0.7332876737553184, 0.8663188728314561, 0.6065994370074994, 0.5116571844802178, 0.7304222733695209, 0.7161710074223175, 0.8390901346847497, 0.9313752578330527, 0.5076349391737252, 0.6038296794079221, 0.9799096227202628, 0.6767080551225046, 0.9940941671739913, 0.9972578750361397, 0.6457847060714412, 0.5464609522041712, 0.6732862003451926, 0.6401210000483359, 0.5414962282064197, 0.9592243031953914, 0.6922995375396317, 0.6661803192118264, 0.7658385829932386, 0.6432554539621163, 0.9858775406278485, 0.6877980284219982, 0.5077793465393678, 0.8310664471880758, 0.9168808968953375, 0.8827939244131747, 0.669849810441075, 0.8710691152541598, 0.597125321757807, 0.7071095353986014, 0.8961302137027857, 0.7030317685718277, 0.6052857912038123, 0.6800624171180985, 0.7636685321366439, 0.9194471454957212, 0.6301171094511084, 0.6474237896179962, 0.917171659235374, 0.8980330928977023, 0.5334943377616065, 0.527165707240147, 0.6605350733096556, 0.6245343661710043, 0.6056582465625473, 0.8722062573609313, 0.7646046167979292, 0.5700127525965581, 0.8760302332637804, 0.5776497931908584, 0.5825121066014792, 0.8441556760603387, 0.7119620731612893, 0.8138825085183061, 0.9084379788114474, 0.7288039962994917, 0.8760343015557351, 0.9040477866819276, 0.9710647026425021, 0.7671134694593194, 0.7376027099014548, 0.8777811222771567, 0.8329881409190278, 0.6375598569085397, 0.8778782241219102, 0.7867926265536085, 0.9661382060756657, 0.769732609194673, 0.6122009435519457, 0.965484059109234, 0.5697053433248236, 0.5925089213130612, 0.9983995615500316, 0.9461762256563041, 0.5539106616733447, 0.576498309668352, 0.562178206681162, 0.6027280367333165, 0.6386337388250825, 0.9325566399695127, 0.549018799308062, 0.5159524348862905, 0.7044120379756602, 0.9694674305103554, 0.5152765860708828, 0.7230101508688378, 0.8767745313490868, 0.7680451757775026, 0.7452701147615081, 0.8741774042860948, 0.640259206970355, 0.9964835235673875, 0.5966216044909063, 0.9976628296702508, 0.9450316428703989, 0.8816241358772796, 0.5619799863865178, 0.6542911021680827, 0.5736669642193328, 0.6929318552150825, 0.6357558630850796, 0.5222364661283267, 0.5452984019142897, 0.5662811514963241, 0.7885760941228825, 0.6132444735065704, 0.7278754122240811, 0.6202802576406735, 0.5784020082378134, 0.6975336752592799, 0.9788099835257953, 0.6390760767778116, 0.5596679993842447, 0.9387202859533681, 0.8284231094100161, 0.695040610788033, 0.7081870462888727, 0.9813246918127878, 0.8866733911793414, 0.7725253548554022, 0.7476581494257717, 0.6543700687955671, 0.677940844146041, 0.7951794878216183, 0.9070873681077269, 0.6615211207648771, 0.849399192161766, 0.9111623958347791, 0.7747943508693702, 0.9355658051727225, 0.9990063786906356, 0.8403540207686433, 0.8351410977048668, 0.5596878256732586, 0.9184599051431153, 0.5295462024756541, 0.7262022462845206, 0.6634737515076814, 0.5025941198041411, 0.5256343548311591, 0.8278410218795647, 0.9458364551830196, 0.8708833548780529, 0.8855776252237395, 0.6776600060042937, 0.7123767970078283, 0.5087456545430726, 0.6483171224704899, 0.6743131496848528, 0.5052896230245789, 0.737218555120611, 0.6236932256553874, 0.6393318210500594, 0.503418389249122, 0.6982973247345567, 0.5096285552797022, 0.6923698604982349, 0.8195848683255782, 0.7609425159266053, 0.7938850849991217, 0.8530056978066796, 0.5312629360077543, 0.6393838873521702, 0.9656459908783785, 0.9178617572632475, 0.5529206710412817, 0.5076010457136242, 0.5906523691755314, 0.7415616809307972, 0.5856268042039123, 0.6051137991246114, 0.5345255583453699, 0.9720222993010386, 0.8639900469576218, 0.8829632979219306, 0.9229798767274846, 0.6250813224484044, 0.7299816092567051, 0.8104322257394847, 0.8486143307136544, 0.5197937460773023, 0.9919311192096378, 0.8470759221668724, 0.5936833785206501, 0.8371404681372597, 0.6578134127881287, 0.7220364525710066, 0.5963576779091398, 0.7367996521674112, 0.9434881667047679, 0.7344942299287895, 0.5384192703907209, 0.5114988274939078, 0.9617802668775373, 0.8571770651813574, 0.6878438685123178, 0.7435931731276658, 0.668747528184958, 0.9446609110796875, 0.5285727118039819, 0.6702947230316831, 0.8574076647251584, 0.821022847217759, 0.8541277237062677, 0.8882292054365788, 0.6575279464466643, 0.8177809896957382, 0.5734773788432004, 0.5313140631534015, 0.9772616789066856, 0.9069095432771609, 0.9435219899765006, 0.7983822461490123, 0.5863902468043174, 0.9405163947070144, 0.5919870738451853, 0.5724388677129268, 0.8943350606626034, 0.7713101215478038, 0.9697261996544944, 0.610961770948301, 0.9193363695698751, 0.959583629171213, 0.8813425382792119, 0.9061120309737645, 0.8176107770217361, 0.7371684952429556, 0.7960439928013694, 0.569525920808873, 0.8574202936217823, 0.528697901847905, 0.6136142061997423, 0.6873377339410665, 0.86963510190823, 0.6808670734879264, 0.6979565039816433, 0.6044661738795543, 0.8535838461375633, 0.8913959681736382, 0.8459587926814323, 0.640538548770478, 0.6167233493551189, 0.6628203273741466, 0.6133152101678102, 0.5501619725854809, 0.584585891186839, 0.9655411754490364, 0.6780061717231618, 0.6133906184080238, 0.594994069122587, 0.6081189490148329, 0.8081679874667397, 0.9478494300514286, 0.7104319218246943, 0.6000451994333695, 0.8137680216086305, 0.9163882428969182, 0.533311717284424, 0.6391480478845921, 0.5406251528597419, 0.7011124544184029, 0.5111674964330938, 0.526870308075373, 0.6727867889497823, 0.73187435913709, 0.6274096798264364, 0.910582724572623, 0.8310655537152647, 0.6058357511383758, 0.5918703424244817, 0.6809011268485952, 0.5450451919472226, 0.8525496258475271, 0.9493405691351027, 0.5112613585461061, 0.9650275792489972, 0.5226584109017349, 0.949620101753273, 0.6690386388837253, 0.9275570893121332, 0.7672580361639545, 0.5255647625227428, 0.865573406080496, 0.8805592295716941, 0.9547633534256459, 0.8228863645322694, 0.8082278233307036, 0.7677753113193886, 0.5809337918662758, 0.838905717753517, 0.6493935606460551, 0.673967385107581, 0.9989890324340597, 0.7487454251672918, 0.7911689027052096, 0.8509912782088329, 0.6829055206879997, 0.8482790412707396, 0.8929051511721974, 0.9051358559135799, 0.5574812060020334, 0.734883745878369, 0.8343533544502393, 0.6598573002488348, 0.7919616485442904, 0.7279682654754621, 0.6534298632997673, 0.8880661449117893, 0.8293151125834908, 0.9330120918222602, 0.6903324138643983, 0.6371944990875766, 0.8453380568277065, 0.601740795323724, 0.6632248020491309, 0.9952108360765033, 0.7478543950189396, 0.9667305273764651, 0.6851613807177774, 0.6269312104250713, 0.8320344661759531, 0.910330839781929, 0.8608051659404599, 0.5990185985491058, 0.7563616309560731, 0.8557572600615417, 0.6580889123210236, 0.7854032746248968, 0.5805352938364533, 0.6123926625199749, 0.5005843603745732, 0.8601418199371167, 0.7998868394418279, 0.6465389494444781, 0.876781188278911, 0.5656376802144873, 0.74397557897045, 0.9535138755198167, 0.7748996193998652, 0.842334794174034, 0.737399038037885, 0.5276181800423503, 0.57794659696356, 0.8005941790852902, 0.763151219947436, 0.6618397871272007, 0.669851320836627, 0.8357466787630119, 0.5716290630006965, 0.6648423307934803, 0.9769182941410308, 0.8459743041152992, 0.9593565351219565, 0.753315750857677, 0.7174673790920385, 0.7723776814581321, 0.7158038327750256, 0.9910939926896215, 0.5035589962269182, 0.6524404971878588, 0.567243765143006, 0.7130404937925108, 0.9559797380455091, 0.8944734449823866, 0.8665059762324254, 0.9320960517458838, 0.55434804382629, 0.6206311358527425, 0.7593235047988101, 0.6141005172328056, 0.9203405422453191, 0.7606708713255045, 0.857587339348113, 0.6250566238529002, 0.8040487288490686, 0.7505337895448285, 0.95932314803638, 0.785848099200076, 0.8848544571849364, 0.9781240101141977, 0.6466637889152779, 0.6193647868800967, 0.6431110248099525, 0.6481058844716772, 0.781069284931083, 0.7086820662788142, 0.6463685714015612, 0.9543485334418889, 0.6454543746746059, 0.5791425277139527, 0.9925985539573949, 0.6709651581515013, 0.8974376344520111, 0.5059726491433705, 0.9003839755643528, 0.5162958478713187, 0.871795455634396, 0.6896390410965261, 0.5824445053459193, 0.8072006679630147, 0.8865878959107507, 0.5335108055651641, 0.5764742707580659, 0.7954256402419004, 0.6342804023849034, 0.9449321663748589, 0.6727236712838394, 0.8716539207359272, 0.8799764273664472, 0.8863467585298413, 0.8037967812343405, 0.8997995305818131, 0.9912403615909395, 0.528881340361478, 0.9085314954829039, 0.5660571221319128, 0.9261539658157423, 0.7264969337403178, 0.911976555003035, 0.7537899327965893, 0.574827767478872, 0.8961539187730245, 0.9210645491080596, 0.5212822480734314, 0.8472938030294361, 0.8766471033518755, 0.5984232713416611, 0.6258039052394054, 0.7603694258605671, 0.7373748279474959, 0.8432476810850273, 0.592861421118258, 0.5746507676757471, 0.8888920112712375, 0.9387404014396082, 0.8931648947206053, 0.8584652869135685, 0.7971942132469696, 0.737569610818419, 0.6973039083680002, 0.5407068171851774, 0.5801972274634357, 0.7190893855851099, 0.6592808948523268, 0.8103092649121717, 0.6642744310201197, 0.7349050892850548, 0.80466661379532, 0.6086606211500099, 0.7916173856842905, 0.6486411861881305, 0.9425093764010632, 0.8003557176576109, 0.7419992098435242, 0.6137367477823389, 0.7751174644817276, 0.71996734801993, 0.5401224486864129, 0.5475402952711411, 0.5014619236445146, 0.933680098944397, 0.814312035018963, 0.5964913822125227, 0.5519853224012385, 0.7414317323785847, 0.645638314053669, 0.807764542789521, 0.7198208899973466, 0.6547552502937455, 0.7014249791211024, 0.7586010191687566, 0.9803700529616336, 0.9118368460240308, 0.8899090258114873, 0.8333802225774634, 0.9179092075118642, 0.5368114863059599, 0.5743791516580754, 0.8351697713718571, 0.6750976565519731, 0.5431724380062546, 0.9451147560507491, 0.9480303740984193, 0.6679113598777755, 0.7828708528509385, 0.7010786148607424, 0.8154181456349906, 0.6817691722709603, 0.8343285526121014, 0.8169586731129361, 0.7078200484692788, 0.5530980230832969, 0.5404532709774785, 0.9830642849404332, 0.9017876442373725, 0.7388359714249984, 0.6718956799451967, 0.5017501914855163, 0.8818033983714011, 0.54228442662125, 0.828193779489183, 0.8680948590213118, 0.9984724980820026, 0.9666140130052019, 0.8065875576140225, 0.8036625037560062, 0.76090716021359, 0.9138969358358265, 0.731713716564278, 0.6627811739970371, 0.6994207983722146, 0.6809396054127974, 0.7570370026346759, 0.9917598479638204, 0.8251510407383245, 0.8063429910811128, 0.7429506093230392, 0.7041390489183952, 0.6173126323921273, 0.8672053682832521, 0.8986117610966773, 0.8883996150042157, 0.5248602648450891, 0.59913196660872, 0.9586735810812557, 0.6136326177416296, 0.6770886890199436, 0.9225872252874547, 0.8212078824195077, 0.7799697378531039, 0.7374126816295539, 0.5077452728974081, 0.7862291156808021, 0.9880499640176947, 0.8093112032360109, 0.8545281361674899, 0.6070914984688917, 0.8797526634246682, 0.7940477626426605, 0.6238721218983896, 0.5953739406388523, 0.6706389126386847, 0.6240250459346209, 0.8073425663316451, 0.70934333896997, 0.9198808276290269, 0.9335874009486275, 0.9571981435653902, 0.8578702638103793, 0.5064336577554474, 0.8365956363437586, 0.8570729843104329, 0.6908616589752838, 0.8935136471001419, 0.5577753159602633, 0.6590971167389044, 0.96186358039419, 0.95232523360429, 0.5722042735897315, 0.7332465168372113, 0.6543165781658764, 0.7855822626995026, 0.731833779763986, 0.6424485121404478, 0.9291583562683646, 0.9433625783323077, 0.7761056817875004, 0.9297636813489742, 0.8494230386957763, 0.6656566634192898, 0.5213154013499302, 0.83129494273893, 0.720127345892152, 0.5070348614590474, 0.875928798552973, 0.7711290371527866, 0.9733989823175562, 0.7210170201484603, 0.9923820901992415, 0.9551546289442778, 0.604122420912772, 0.9904178094673084, 0.9628923978738297, 0.7260562706598744, 0.5620353895550882, 0.7922171349164244, 0.7113053174799837, 0.6182151257442057, 0.6871591369734193, 0.8240497827180642, 0.9631464853175777, 0.7332887276951332, 0.8680080705143477, 0.5494784321323192, 0.890450649398673, 0.9720829584910329, 0.5168157385554499, 0.6882147383745605, 0.8118669762636845, 0.8950926696454509, 0.7407586855293162, 0.8043253940427225, 0.7302200358133455, 0.9072053650499967, 0.9698749467912425, 0.5121668498138384, 0.6038904854121041, 0.5832900927366415, 0.9038499613695358, 0.651318523839245, 0.948896684425093, 0.8253979212176197, 0.8102718224705656, 0.8763922579603345, 0.6420037746551381, 0.9868470972105456, 0.7285892342455416, 0.5274045910077813, 0.692947733575934, 0.6465996776171321, 0.5365644048168339, 0.8505185095133506, 0.5163562783006876, 0.6659430726640723, 0.7264001988819355, 0.5404085962227889, 0.8812599143757118, 0.5007561884096584, 0.85396336904042, 0.5641544295041958, 0.6098203686452498, 0.5416357048934515, 0.6858034603772085, 0.568763468288051, 0.7427310323036529, 0.6512454198630359, 0.7637638202137378, 0.8273257636881075, 0.8526862564304313, 0.742839691629497, 0.5687346403800076, 0.7036001973180223, 0.8000499701341492, 0.510424495807041, 0.9672001075279146, 0.9128623403132652, 0.6384304414878882, 0.5292022610268451, 0.8974573738220816, 0.5164743736059437, 0.9659557380422552, 0.8858255853950756, 0.6230455316143237, 0.7552055540992535, 0.5370254416239015, 0.9558612904828476, 0.7555777970040585, 0.7494173091579286, 0.7446436863689547, 0.6913841571507724, 0.9053675014738637, 0.9428861945680782, 0.6878692720672449, 0.8707907187438365, 0.6910807678245845, 0.6120954121326903, 0.5753399032101388, 0.5238409229568036, 0.9465849813960344, 0.5036996590971701, 0.7402705868338402, 0.8369184757697296, 0.6357456826406034, 0.708663489591698, 0.9659273696864199, 0.7402693202676498, 0.8018265329951759, 0.9584158898203003, 0.6778873784920787, 0.9925140042370935, 0.7880492420380076, 0.7180493593025692, 0.9344649526772344, 0.5679685597239943, 0.7197673728144123, 0.9841198425323539, 0.9749995662108167, 0.7907418816106373, 0.8343608059595846, 0.7538420673180902, 0.837849661516519, 0.8943866416060273, 0.6833686756163105, 0.5545535664363768, 0.6083895752324489, 0.9612495251431834, 0.7452042824431017, 0.6970265092851099, 0.6869248604196775, 0.736887615973997, 0.9801903621880037, 0.5674372072717332, 0.995151161682938, 0.6838576889364709, 0.8881819466711849, 0.8369156865797658, 0.8892256083878314, 0.8145852851818145, 0.8232025656237039, 0.8572966751747113, 0.9619391727442874, 0.5130578299924513, 0.6924103838392954, 0.9806096086633649, 0.710539125741118, 0.7646892402933144, 0.8524785867693976, 0.6203399210251034, 0.8042567192055384, 0.5773715628062346, 0.8687641497642751, 0.7444691171614937, 0.8327025119768735, 0.9680384297247804, 0.584221188423965, 0.6324979197537872, 0.5397678581285941, 0.7718456625577197, 0.8408772029527464, 0.6201950684974449, 0.7396963585795839, 0.9963818773644646, 0.9103900104861473, 0.6079105824034906, 0.9965262588913502, 0.5919618231632785, 0.5250505088491675, 0.9377619067146375, 0.7610966075403073, 0.9498260263756824, 0.9516637792383911, 0.76458077690962, 0.5798306923369025, 0.9705480921219592, 0.7277050391706027, 0.6100034946889643, 0.5968536347028064, 0.7918331561563468, 0.8495311038799545, 0.7617171576746999, 0.9439196478295522, 0.5713348837748964, 0.9455486421713666, 0.7408993606233821, 0.8343042371600045, 0.9097056014441427, 0.6454969747011028, 0.7990725319685082, 0.6718082673357111, 0.6453653001555453, 0.6034289157433365, 0.7082433977223146, 0.8442576554673502, 0.9225094785269083, 0.5126030105424411, 0.7821151660154726, 0.6623011967796426, 0.8818141660498975, 0.7760254260526372, 0.5220354819069188, 0.9766238667444141, 0.7373446275995156, 0.9196168972414087, 0.7481288242021495, 0.7483860139646886, 0.6143767300832164, 0.5012345785842749, 0.8982698641449123, 0.7853073270704793, 0.7177682372972836, 0.6233219451956471, 0.6110564814267664, 0.5865474100149786, 0.5508157392218831, 0.8480994616128568, 0.8515269992728436, 0.7819380824872577, 0.7006273499829608, 0.5305972504061935, 0.8473596065192744, 0.7219554209367318, 0.9888695058758203, 0.703947670475153, 0.8492444323812557, 0.5491207580454174, 0.5928689168861226, 0.735035524547721, 0.6237957264429901, 0.7500766843946711, 0.6799844526966411, 0.6076720941109628, 0.8013080062571293, 0.5432086125033181, 0.6224784263290659, 0.9813748846648338, 0.547962312170595, 0.7761161039169, 0.7607004472367178, 0.7888943611177186, 0.9891855369670826, 0.5306749173155756, 0.9679418391869681, 0.6660170610113625, 0.5280979596132882, 0.7361285014887602, 0.9897036704999317, 0.9713309808090773, 0.9601050165897966, 0.7903359672590702, 0.8570406957473842, 0.6079397733334317, 0.9636462627152644, 0.9160137774275994, 0.7512384998308115, 0.9272251607194808, 0.7632606491180376, 0.9144122143079596, 0.5719151578285369, 0.9925917343477639, 0.5212108381396887, 0.66314840068968, 0.9534029229502723, 0.8164309097674982, 0.8696774254217876, 0.7551709249645053, 0.6491889503455475, 0.9740136643041606, 0.7007426999390631, 0.5561031966198862, 0.9282424973924519, 0.5672684476461248, 0.89572606279721, 0.5147669511684094, 0.8249070513292425, 0.6519562156521876, 0.8333626016899562, 0.7284395731874121, 0.8674952964338116, 0.7608153410368906, 0.759450013074444, 0.6342772057291081, 0.8827867877907789, 0.555807227306301, 0.6074036665499111, 0.8308397267145953, 0.9423411428726904, 0.571860970467106, 0.9998969518571865, 0.6109782570576742, 0.5383816804356143, 0.6246842676052222, 0.7122228905653083, 0.506345735927026, 0.6594282404996585, 0.9400272237030404, 0.9465047296223252, 0.8895590693382458, 0.7607148587815604, 0.9154690818162379, 0.7854689820371437, 0.6080681067065412, 0.7701245189923087, 0.916880716039187, 0.6211435375019867, 0.9301658432136903, 0.6569808782474639, 0.559064551117254, 0.5344102451337053, 0.5103843689009846, 0.752777964052218, 0.7598825734748795, 0.6787814876108215, 0.5872057125154584, 0.6303843043565696, 0.6838289201192034, 0.5634272565918939, 0.8034786477726406, 0.680843169271353, 0.7807397761705497, 0.7714856844226079, 0.7796249999219234, 0.9405097352157825, 0.8431684145519265, 0.6248718712802102, 0.9153579442716941, 0.8558521485249484, 0.6416572068871333, 0.7112041728084362, 0.8988706742205738, 0.6431889065241378, 0.9813704755815785, 0.9789166804522087, 0.5045748047245011, 0.9408863473080353, 0.5112666671853732, 0.9896021966015169, 0.6378918591633955, 0.7566531991761563, 0.916333039610631, 0.9649922022717198, 0.9003131122747117, 0.9287888783928592, 0.7775427527945384, 0.8783106693148752, 0.9629294361456726, 0.738786311440947, 0.7045350315219203, 0.7271010086404397, 0.9641021816809925, 0.6416080514068141, 0.8344663009508233, 0.9348629021468533, 0.9302849663284447, 0.6613744261629295, 0.7723082394734793, 0.9818661613485128, 0.8322854809657457, 0.8965273243626244, 0.9290531883414379, 0.6903233725520828, 0.8015178853022147, 0.9807808862769756, 0.8633518226169598, 0.6589297783465558, 0.8787925127563637, 0.6704230482662132, 0.9399191994190643, 0.6962884927220451, 0.8260604209171543, 0.6327721694429773, 0.6011494003224086, 0.665163162224706, 0.8739879005551768, 0.8343629715883405, 0.5977174596873973, 0.9863150302171255, 0.9569219178442696, 0.9356254876310081, 0.5315888966403847, 0.6200855056432282, 0.590864185946136, 0.53658100274591, 0.715029834252066, 0.7270492923041673, 0.8797108478817689, 0.5709397199391697, 0.9106261788691045, 0.6343319583598195, 0.5858548567134482, 0.5757885169411893, 0.8832267988817176, 0.9945935042457059, 0.7973678388545082, 0.9359080044740966, 0.7095990418386695, 0.8094394831469017, 0.9049497793936964, 0.7510879174816284, 0.5205648842236267, 0.5749567915647196, 0.7189347044586077, 0.8727093923540378, 0.8682924448095202, 0.6001712502495308, 0.8069099200470592, 0.9072975836191894, 0.5564007137818066, 0.8221973648962009, 0.75811495001263, 0.8283369917818326, 0.9026499418080398, 0.9162019589565324, 0.5856906634376196, 0.96290282515169, 0.6008785654610242, 0.7122861281221822, 0.7018951403406387, 0.5282215296969814, 0.5578217851845879, 0.5165635632933487, 0.9164730278184412, 0.5045621178284894, 0.9461511580531841, 0.6068905606389845, 0.9863458161197843, 0.7161346121716731, 0.9420345627213724, 0.7293616604338414, 0.5211030040691735, 0.9037762449361731, 0.5082005056475791, 0.9311672047865949, 0.5118076346198535, 0.6523131597082291, 0.8538326490922968, 0.599350064450785, 0.7305312828350665, 0.925160723141524, 0.9780028372420322, 0.5749382708951943, 0.5744802655553339, 0.9406106411235238, 0.747792752108919, 0.7523086758783895, 0.8674767859693182, 0.6526456804805915, 0.8645087541828086, 0.5107994042842043, 0.8854043709691495, 0.958494624518552, 0.7872287106286882, 0.709201907507504, 0.6689384239108431, 0.9991551909402256, 0.5085025441130884, 0.5794588933433766, 0.5844837986090494, 0.6118031672175229, 0.911925276551961, 0.5736404990616155, 0.8232233559692514, 0.7299189323857512, 0.9969113187013892, 0.5892632963688285, 0.7528298268060762, 0.659175100502537, 0.6237049106925392, 0.8850607539374011, 0.9536694598997102, 0.7002091909729588, 0.8242758088830147, 0.6104542499130519, 0.8639513345333475, 0.8932450140280509, 0.5485682085483337, 0.6399108063911134, 0.7881044906425451, 0.8725803379005208, 0.8975808221055315, 0.938024276532454, 0.6650501390887882, 0.6992489421917586, 0.9701161379573577, 0.8599364579667778, 0.6513557052888841, 0.9915736174126177, 0.8191557354971084, 0.6206899512451707, 0.9144592371083711, 0.6383982355686058, 0.741998947467668, 0.8361978542311979, 0.84962878557439, 0.8590893348449309, 0.8596948532676587, 0.8701705153540853, 0.6315857115733519, 0.7509560513868097, 0.8941838379864693, 0.6738557513680036, 0.7363345522186565, 0.672052271231676, 0.8835233179254007, 0.6693432135784432, 0.5757130057314513, 0.8261955044083621, 0.5852020417597226, 0.8302818717309136, 0.9088301485556398, 0.9139506453417019, 0.8030039258273112, 0.618524315825521, 0.6865353449085283, 0.8739817584818519, 0.632769668659569, 0.6908149334450482, 0.6927237548965595, 0.8628200125916192, 0.8590412425410114, 0.7018430606968868, 0.9213824742628438, 0.7414004639245292, 0.598748538220057, 0.5528009909138447, 0.8484798768635864, 0.9256293163759332, 0.9197746208621969, 0.9749468949822566, 0.5411807588434523, 0.7525987563092837, 0.7866265793640316, 0.994409479562365, 0.5859833552138833, 0.7994878427045353, 0.6020975590131978, 0.7042492438448726, 0.6217799424632134, 0.5354625671995132, 0.8080557016068661, 0.9492856675465233, 0.5122307198483405, 0.8405930446919273, 0.6437919164558843, 0.5151918310331657, 0.549755747767048, 0.7472361786316583, 0.5562919862074065, 0.6770545343037007, 0.6843565342179296, 0.7890844377971458, 0.6445744836302529, 0.725916741866188, 0.9048313143293408, 0.7409348047064588, 0.8969056961463021, 0.9361298970976466, 0.9624538334749448, 0.7272326020944075, 0.8364402130963664, 0.8495401714333635, 0.7685651768948998, 0.59582819481912, 0.7840758839882993, 0.7019776847463882, 0.5187659065280869, 0.8710485595144026, 0.7180722295954325, 0.6068836901570684, 0.9641235489549258, 0.9651090042828976, 0.6379599785616985, 0.9441078289126331, 0.7372223177570529, 0.5371721906866889, 0.6747897539401984, 0.5236276272868565, 0.6321644118773123, 0.9624420156066794, 0.9291293214959884, 0.7111834587029189, 0.9050451603278367, 0.7549433639829841, 0.7465902593661318, 0.5117452863953508, 0.8803625394965805, 0.6118376209350819, 0.5574962578021898, 0.9624029637154977, 0.7725756405209483, 0.9382983310613413, 0.6797603981898187, 0.8337443535599166, 0.5011364745935383, 0.9872448730543374, 0.9171690725986543, 0.76183285947949, 0.7182726794529586, 0.6007925176619024, 0.5527609929665667, 0.6481070189990576, 0.5923636300765208, 0.8160503818425161, 0.9365102746553231, 0.7736981245224566, 0.7605648722619249, 0.9333626932365386, 0.9429610555191514, 0.5043671038711677, 0.5476375593488494, 0.5310417732620699, 0.5774709694161161, 0.7385516946790663, 0.5877739457577427, 0.9256266139759437, 0.7962619015551782, 0.8754419641703729, 0.8765639391957611, 0.654655614945783, 0.9231079357863452, 0.8093617469980275, 0.8588536646513388, 0.5591409388183455, 0.741028531502632, 0.7312108590993045, 0.5736171872670586, 0.7187736157572833, 0.6015910393121435, 0.534899529057655, 0.5767132051203742, 0.8905323441159252, 0.6922778194543302, 0.8453038270329627, 0.8669898704269228, 0.5428270939327391, 0.8661444394915716, 0.8830107157278955, 0.9876224128038178, 0.5811299277648962, 0.7781082673772666, 0.5097534793480623, 0.7569348680852335, 0.6546845271066593, 0.86870427625371, 0.9616573844450222, 0.6305346824435085, 0.6405519580314807, 0.9200128288876654, 0.7648767813417923, 0.5531160630750107, 0.9220135124141662, 0.6929278667161934, 0.5584234960279145, 0.5387445417642096, 0.686403833662957, 0.7659965646883253, 0.5841663299763712, 0.8812106990087814, 0.988240266704844, 0.7277822889748686, 0.6816443009469657, 0.9201240441023447, 0.6721073479997417, 0.7471859813903733, 0.9464619717742093, 0.5309995436902474, 0.9614579971518027, 0.9887745003271984, 0.7254534708646083, 0.8333981559377882, 0.9356436616740794, 0.5725651809884322, 0.838048478558742, 0.5764073651419267, 0.921812457449674, 0.940849632853658, 0.9396440691268897, 0.8577122482380009, 0.717610942755937, 0.512712909920964, 0.6469906333971966, 0.986267505187467, 0.7880970967187115, 0.6983222586747296, 0.7357727349744225, 0.5664123365399178, 0.5972293300899021, 0.6372399359254891, 0.6902656000162729, 0.9856134643247783, 0.5161614789501192, 0.5198535566155821, 0.5526549599157097, 0.9007760272218424, 0.8633098539816491, 0.9043213574466145, 0.8017890864570727, 0.8057948263693244, 0.7953305125438946, 0.8941531905803687, 0.806127569893911, 0.669253525834529, 0.931567674007615, 0.5898262666106988, 0.7664128203562691, 0.6898371281098495, 0.9402131172476212, 0.5462007161232376, 0.9037662490342141, 0.9833009791944044, 0.502334176557468, 0.7651728070828656, 0.5561600844770741, 0.9170540416236816, 0.8904206703882754, 0.8489861234077738, 0.889188792948248, 0.8125805560867925, 0.7092042592242037, 0.5764236086928209, 0.7427072202163683, 0.8733994740765748, 0.8845297565439765, 0.7341032469184819, 0.5826668524448593, 0.6783470874106791, 0.6700269846325348, 0.6089757562849996, 0.9577350224777994, 0.7940954322166751, 0.5775897570998443, 0.6153981228436263, 0.6105658739460591, 0.8033255490555549, 0.739868340261403, 0.6984376231430982, 0.693582907818113, 0.6869706669139439, 0.9303894726374518, 0.9180504565164317, 0.8105272859805797, 0.8889374703572381, 0.8552879374946907, 0.7774253530234071, 0.7458637027518951, 0.7776549998002066, 0.8338397116087878, 0.7686071110511765, 0.8332084800751365, 0.7447386506371092, 0.7249771186928371, 0.7932199758840237, 0.6299717672171834, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 654, 656, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 752, 754, 756, 758, 761, 763, 765, 767, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 836, 838, 840, 842, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 868, 870, 873, 875, 880, 882, 884, 886, 888, 890, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 918, 920, 923, 925, 928, 930, 933, 935, 938, 940, 942, 944, 947, 949, 952, 954, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1040, 1042, 1044, 1046, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1082, 1084, 1086, 1088, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1142, 1144, 1147, 1149, 1152, 1154, 1157, 1159, 1165, 1167, 1170, 1172, 1175, 1177, 1180, 1182, 1185, 1187, 1189, 1191, 1193, 1195, 1198, 1200, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 1362, 1364, 1366, 1368, 1370, 1372, 1374, 1376, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1396, 1398, 1400, 1402, 1405, 1407, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1430, 1432, 1434, 1436, 1438, 1440, 1443, 1445, 1448, 1450, 1456, 1458, 1461, 1463, 1467, 1469, 1471, 1473, 1475, 1477, 1480, 1482, 1485, 1487, 1490, 1492, 1495, 1497, 1500, 1502, 1505, 1507, 1510, 1512, 1514, 1516, 1518, 1520, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1560, 1562, 1564, 1566, 1569, 1571, 1574, 1576, 1581, 1583, 1586, 1588, 1591, 1593, 1596, 1598, 1601, 1603, 1606, 1608, 1611, 1613, 1616, 1618, 1621, 1623, 1626, 1628, 1631, 1633, 1156, 1156, 1164, 1162, 1161, 1164, 1162, 1161, 1590, 1455, 1453, 409, 408, 1590, 1455, 1453, 1455, 1453, 1578, 1573, 1585, 1504, 1509, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 409, 408, 1453, 1455, 1455, 1453, 1504, 1509, 1479, 1489, 1479, 1489, 1635, 1630, 1585, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1378, 1504, 1509, 1504, 1509, 1378, 1504, 1509, 1504, 1509, 1578, 1573, 1585, 1590, 1578, 1573, 1585, 1590, 1630, 1635, 1635, 1630, 1578, 1573, 1585, 1590, 946, 958, 1455, 1453, 1489, 1489, 1504, 1509, 1504, 1509, 1578, 1573, 1578, 1573, 1585, 1590, 1578, 1573, 1578, 1573, 1585, 1590, 1580, 1305, 858, 858, 845, 845, 879, 879, 958, 946, 946, 958, 1141, 1141, 1091, 1091, 1164, 1162, 1164, 1162, 1164, 1162, 1164, 1162, 1455, 1453, 1479, 1479, 1479, 1489, 1479, 1489, 1455, 1453, 1305, 1509, 1509, 1504, 1504, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1635, 1630, 1635, 1630, 1580, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3578, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762, 3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, 3826, 3828, 3830, 3832, 3834, 3836, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3854, 3856, 3858, 3860, 3862, 3864, 3866, 3868, 3870, 3872, 3874, 3876, 3878, 3880, 3882, 3884, 3886, 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 877, 872, 4099, 858, 927, 922, 956, 951, 653, 653, 892, 1146, 1151, 1151, 1146, 1162, 1179, 1174, 658, 658, 927, 922, 4117, 956, 951, 927, 922, 1151, 1146, 1146, 1151, 658, 653, 1202, 1197, 4483, 4486, 1202, 1197, 1465, 1460, 1484, 1479, 1452, 1447, 4490, 4492, 1484, 1499, 1494, 4141, 4495, 1452, 1447, 4497, 1465, 1460, 1484, 1509, 4146, 4499, 1452, 1447, 1453, 1455, 1465, 1460, 4224, 1479, 1489, 1499, 1494, 4502, 1452, 1447, 4504, 1452, 1447, 4506, 1460, 4508, 1452, 1447, 4510, 1452, 1447, 4512, 1465, 4514, 1452, 1447, 1452, 1447, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1504, 4518, 1479, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4224, 1479, 1489, 1494, 1499, 4520, 1455, 1453, 1465, 1460, 4224, 4522, 4524, 4183, 4184, 4526, 4458, 1452, 1447, 4529, 1452, 1447, 4531, 4533, 4189, 1447, 1452, 4535, 1452, 1447, 4537, 4539, 4194, 1452, 1447, 4541, 1452, 1447, 4543, 1465, 1460, 4545, 1484, 1484, 1499, 1494, 1499, 1494, 1499, 1494, 1504, 1452, 1447, 4547, 1452, 1447, 4549, 1465, 1460, 4206, 1452, 1447, 4551, 1452, 1447, 4553, 1465, 1460, 4445, 1484, 1479, 1499, 1494, 1504, 1499, 1494, 1499, 1494, 1452, 1447, 1455, 1453, 1465, 1460, 4217, 1479, 1489, 1499, 1494, 4556, 1499, 1494, 4558, 1447, 1452, 1453, 1455, 1465, 1460, 4224, 1479, 1489, 1499, 1494, 4561, 1494, 1499, 4563, 4565, 4567, 1600, 4569, 4571, 1595, 1610, 1605, 1615, 1625, 1620, 1625, 1620, 1625, 1620, 4575, 4577, 4579, 927, 922, 4240, 956, 951, 927, 922, 4246, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 658, 653, 1202, 1197, 4583, 1479, 1484, 1484, 1499, 1494, 1509, 1499, 1494, 1504, 1447, 1452, 1453, 1455, 1465, 1460, 4276, 1378, 1479, 1489, 1499, 1494, 4587, 1499, 1494, 4589, 4591, 4593, 4595, 1600, 1595, 4597, 4599, 4601, 1600, 1595, 4458, 4405, 1590, 1585, 1595, 1600, 1605, 1610, 1559, 1620, 1625, 1635, 1630, 877, 872, 877, 872, 877, 872, 877, 872, 4300, 4302, 877, 872, 858, 877, 872, 877, 872, 4311, 892, 927, 922, 4316, 956, 951, 4611, 927, 922, 937, 932, 956, 951, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1164, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1164, 1161, 1179, 1174, 1179, 1174, 4380, 1202, 1197, 1151, 1146, 1141, 1151, 1146, 1156, 4619, 1169, 4621, 1161, 1151, 1146, 1141, 1151, 1146, 1156, 4623, 1161, 4625, 1169, 1179, 1174, 1184, 4380, 1202, 1197, 1573, 1590, 1585, 4390, 4627, 1484, 1484, 1578, 1590, 1585, 1452, 1447, 1455, 1453, 1465, 1460, 4390, 1378, 4631, 1499, 1494, 1494, 1499, 1452, 1447, 1455, 1453, 1465, 1460, 4413, 1378, 4633, 1499, 1494, 1494, 1499, 1452, 1447, 1452, 1447, 4635, 1484, 1479, 1509, 1504, 4405, 4407, 1452, 1447, 1453, 1455, 1460, 1465, 4413, 1378, 1489, 1479, 1499, 1494, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4424, 1378, 1479, 1489, 1499, 1494, 1499, 1494, 1452, 1447, 4642, 1452, 1447, 4644, 1465, 1460, 4438, 1452, 1447, 4646, 1452, 1447, 4648, 1465, 1460, 4445, 1484, 1479, 1484, 1489, 1499, 1494, 1509, 1504, 4458, 1590, 1585, 1600, 1595, 1605, 1610, 1559, 1625, 1620, 4650, 4458, 1585, 1590, 1595, 1600, 1605, 1610, 1559, 1625, 1620, 4652, 1578, 1573, 1590, 1585, 1600, 1595, 1610, 1605, 1615, 1625, 1620, 1635, 1630, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4719, 4720, 4721, 4722, 4724, 4725, 4727, 4728, 4729, 4730, 4731, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4745, 4746, 4748, 4749, 4751, 4753, 4754, 4756, 4757, 4759, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4794, 4795, 4796, 4797, 4798, 4801, 4802, 4804, 4805, 4806, 4808, 4809, 4812, 4813, 4814, 4816, 4817, 4820, 4821, 4822, 4824, 4825, 4827, 4828, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4842, 4843, 4845, 4846, 4847, 4848, 4849, 4851, 4852, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4874, 4875, 4876, 4878, 4879, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4893, 4894, 4898, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4958, 4959, 4964, 4965, 4969, 4970, 4971, 4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982, 4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 5005, 5006, 5007, 5009, 5010, 5011, 5012, 5013, 5014, 5015, 5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5038, 5039, 5040, 5041, 5042, 5043, 5044, 5045, 5046, 5047, 5048, 5049, 5050, 5051, 5052, 5053, 5054, 5055, 5056, 5057, 5059, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5069, 5071, 5072, 5073, 5074, 5075, 5076, 5077, 5078, 5079, 5080, 5081, 5083, 5084, 5085, 5086, 5087, 5088, 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5110, 5111, 5112, 5113, 5114, 5115, 5116, 5117, 5119, 5120, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5128, 5129, 5130, 5131, 5132, 5133, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5148, 5149, 5150, 5151, 5152, 5153, 5154, 5156, 5157, 5159, 5160, 5161, 5162, 5163, 5165, 5166, 5168, 5169, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177, 5178, 5179, 5180, 5181, 5182, 5183, 5184, 5185, 5186, 5187, 5188, 5190, 5191, 5192, 5193, 5194, 5195, 5196, 5197, 5198, 5199, 5201, 5202, 5203, 5204, 5205, 5206, 5207, 5208, 5209, 5210, 5211, 5212, 5213, 4488, 4485, 4604, 4604, 4800, 4799, 4604, 4604, 4800, 4799, 4604, 4604, 4604, 4604, 4603, 4604, 4603, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 5248, 5252, 5254, 5259, 5261, 5264, 5268, 5271, 5273, 5275, 5277, 5279, 5281, 5283, 5285, 5289, 5292, 5295, 5297, 5302, 5304, 5306, 5309, 5311, 5313, 5315, 5318, 5320, 5323, 5325, 5329, 5333, 5337, 5341, 5343, 5345, 5347, 5350, 5352, 5354, 5356, 5362, 5364, 5367, 5369, 5372, 5374, 5376, 5380, 5382, 5384, 5387, 5389, 5391, 5394, 5396, 5398, 5403, 5406, 5408, 5410, 5412, 5414, 5417, 5419, 5421, 5423, 5425, 5427, 5430, 5432, 5434, 5438, 5441, 5443, 5445, 5447, 5450, 5452, 5455, 5457, 5459, 5461, 5464, 5466, 5471, 5474, 5477, 5479, 5481, 5485, 5487, 5489, 5491, 5493, 5497, 5499, 5501, 5504, 5506, 5508, 5510, 5512, 5514, 5518, 5521, 5523, 5527, 5530, 5532, 5534, 5536, 5538, 5540, 5542, 5544, 5549, 5551, 5553, 5558, 5560, 5562, 5565, 5568, 5570, 5573, 5575, 5578, 5583, 5586, 5591, 5595, 5598, 5604, 5606, 5608, 5610, 5614, 5616, 5618, 5620, 5622, 5626, 5628, 5630, 5632, 5636, 5640, 5642, 5644, 5648, 5650, 5652, 5654, 5656, 5658, 5662, 5664, 5666, 5668, 5670, 5672, 5675, 5677, 5679, 5686, 5688, 5691, 5693, 5695, 5698, 5701, 5703, 5705, 5708, 5710, 5712, 5714, 5716, 5719, 5721, 4608, 4607, 5258, 5548, 5557, 5582, 5581, 5590, 5589, 4608, 4607, 5258, 5548, 5557, 5582, 5581, 5590, 5589, 4608, 4607, 5526, 5548, 5557, 5582, 5581, 5590, 5723, 5724, 5468, 4586, 4585, 5725, 4603, 5726, 4603, 5727, 5728, 5288, 4718, 4760, 5366, 5371, 4586, 4585, 4586, 4585, 4586, 4585, 4603, 5436, 4603, 5437, 4603, 4586, 4585, 5468, 5729, 4603, 5730, 4603, 4752, 4760, 5366, 5371, 4586, 4585, 4586, 4585, 4586, 4585, 5340, 5731, 5732, 4603, 5436, 4603, 5437, 4603, 5366, 5371, 4586, 4585, 5402, 5733, 5436, 5734, 5437, 5735, 4586, 4585, 5468, 5736, 5737, 5738, 5739, 4604, 4603, 4608, 4607, 5526, 5548, 5557, 5582, 5581, 5590, 5589, 5635, 5685, 4637, 5685, 4630, 4629, 4637, 5096, 5109, 5635, 5685, 4637, 4637, 5685, 5683, 4654, 4654, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4610, 4609, 4606, 4605, 5941, 5942, 5251, 4610, 4609, 5943, 5449, 4581, 5454, 4582, 5870, 4613, 4614, 4615, 4481, 5944, 4481, 4615, 5945, 4481, 4615, 5567, 4618, 4617, 5772, 4481, 4615, 5946, 5947, 4481, 4615, 5948, 5949, 5593, 5773, 4610, 4609, 4606, 4605, 5950, 5951, 4610, 4609, 5520, 5952, 5449, 4581, 5454, 4582, 5870, 4614, 4613, 4615, 4481, 5953, 4481, 4615, 5954, 4481, 4615, 5567, 4618, 4617, 5772, 4481, 4615, 5955, 5956, 4481, 4615, 5957, 5958, 5593, 5773, 4610, 4609, 4606, 4605, 5959, 5960, 4610, 4609, 5520, 5961, 5270, 4581, 5454, 4582, 5870, 4614, 4613, 4482, 4616, 5962, 4482, 4616, 5963, 4482, 4616, 5567, 4618, 4617, 5772, 4482, 4616, 5964, 5965, 4482, 4616, 5966, 5593, 5773, 4726, 4723, 5674, 5167, 5164, 5681, 5969, 5970, 5971, 5476, 5300, 5848, 5483, 5850, 4960, 4957, 5973, 5937, 5718, 5940, 5975, 5856, 5503, 5859, 5799, 5358, 5976, 4880, 4877, 4844, 4778, 5393, 4853, 4850, 5400, 5978, 5405, 4639, 4638, 5795, 5349, 5797, 4793, 4750, 4717, 5979, 4758, 4755, 5980, 4810, 4807, 5981, 4818, 4517, 4516, 5982, 5983, 5984, 4638, 5985, 5986, 4639, 5987, 5988, 5339, 5780, 5308, 5782, 4744, 5989, 5990, 5991, 5992, 5440, 4574, 4573, 4803, 5993, 5856, 5503, 5859, 4726, 4723, 5674, 5167, 5164, 5681, 5994, 5995, 5996, 5476, 5300, 5848, 5483, 5850, 4960, 4957, 5998, 5937, 5718, 5940, 6000, 5856, 5503, 5859, 5780, 5308, 5782, 4744, 4750, 4747, 6001, 4758, 4755, 6002, 4810, 4807, 6003, 4818, 4517, 4516, 6004, 6005, 6006, 4638, 6007, 6008, 4639, 6009, 6010, 5339, 4844, 4778, 5393, 4853, 4850, 5400, 6011, 5405, 4639, 4638, 5795, 5349, 5797, 4793, 5799, 5358, 6012, 4880, 4877, 6014, 6015, 6016, 6017, 5440, 4574, 4573, 4803, 6018, 5856, 5503, 5859, 4810, 4807, 6019, 4818, 4815, 6020, 4826, 4823, 4829, 6021, 6022, 5386, 4639, 4638, 4844, 4841, 5393, 4853, 4850, 5400, 6023, 4639, 4638, 5405, 5821, 5416, 5823, 4880, 4877, 5827, 5429, 5829, 4895, 4892, 6025, 6027, 5440, 4911, 4574, 4573, 5856, 5503, 5859, 5449, 4581, 5454, 4582, 5463, 4616, 4615, 5844, 5155, 4935, 5674, 5167, 5164, 5681, 6029, 6030, 6031, 5476, 5473, 5848, 5483, 5850, 4960, 4957, 6032, 5853, 6034, 5854, 5718, 5940, 6036, 6037, 5856, 5503, 5859, 4610, 4609, 4606, 4605, 6038, 6039, 4610, 4609, 5520, 6040, 5529, 5008, 5870, 4614, 4613, 5546, 4616, 4615, 6041, 5555, 4616, 4615, 6042, 5564, 4616, 4615, 5567, 4618, 4617, 5885, 5580, 5577, 6043, 6044, 5588, 5585, 6045, 6046, 5593, 5891, 5908, 5646, 5910, 4639, 4638, 5914, 5600, 5916, 4641, 4640, 5155, 5158, 5674, 5164, 5118, 5681, 6047, 6048, 5906, 6049, 5937, 5718, 5940, 5908, 5646, 5910, 4639, 4638, 5914, 5600, 5916, 4641, 4640, 5155, 5082, 5674, 5164, 5118, 5681, 6050, 6051, 6052, 5926, 6053, 5937, 5718, 5940, 5895, 5612, 6054, 4641, 4640, 5900, 5624, 6055, 4641, 4640, 5908, 5646, 5910, 4639, 4638, 5155, 5158, 5674, 5164, 5118, 5681, 6056, 6057, 5906, 6058, 5928, 5697, 5189, 6059, 5932, 5707, 5200, 5908, 5646, 5910, 4639, 4638, 5914, 5660, 5916, 4641, 4640, 5158, 5155, 5674, 5167, 5164, 5681, 6060, 6061, 5926, 6062, 5928, 5697, 5189, 6063, 5932, 5707, 5200, 4654, 5937, 5718, 5940, 58, 59, 60, 61, 62, 63, 6080, 6081, 6082, 6083, 6084, 6086, 6087, 6088, 6090, 6091, 6092, 6093, 6094, 6095, 6096, 6097, 6098, 6100, 6101, 6103, 6104, 6105, 6106, 6107, 6108, 6109, 6110, 6111, 6113, 6114, 6115, 6117, 6118, 6119, 6120, 6121, 6122, 6123, 6125, 6126, 6127, 6129, 6130, 6131, 6132, 6133, 6134, 6135, 6136, 6137, 6139, 6140, 6142, 6143, 6144, 6145, 6146, 6147, 6148, 6149, 6150, 6152, 6153, 6154, 6156, 6157, 6158, 6159, 6160, 6161, 6162, 6164, 6165, 6166, 6168, 6169, 6170, 6171, 6172, 6173, 6174, 6175, 6176, 6178, 6179, 6181, 6182, 6183, 6184, 6185, 6186, 6187, 6188, 6189, 6191, 6192, 6193, 6194, 6195, 6196, 6197, 6198, 6199, 6200, 6201, 6202, 6205, 6206, 6207, 6208, 6209, 6210, 6211, 5972, 6213, 6214, 6215, 5974, 6217, 6218, 6219, 6220, 6221, 6223, 6224, 6225, 6226, 6227, 6228, 6229, 6230, 6232, 6233, 6234, 6235, 6236, 6237, 6238, 6239, 6240, 6242, 6243, 6245, 6246, 6248, 6249, 6250, 6252, 6254, 6255, 6257, 6258, 6260, 6261, 6262, 6263, 6264, 6269, 6270, 6271, 6272, 6274, 6275, 6276, 6277, 6278, 6279, 6280, 6281, 6282, 6283, 6286, 6287, 6288, 6289, 6290, 6291, 6292, 5997, 6294, 6295, 6296, 5999, 6298, 6299, 6300, 6301, 6302, 6303, 6304, 6305, 6306, 6308, 6309, 6311, 6312, 6314, 6315, 6316, 6318, 6320, 6321, 6323, 6324, 6326, 6327, 6328, 6329, 6330, 6331, 6332, 6334, 6335, 6336, 6337, 6338, 6339, 6340, 6341, 6342, 6344, 6345, 6350, 6351, 6352, 6353, 6355, 6356, 6357, 6358, 6359, 6361, 6362, 6364, 6365, 6366, 6367, 6369, 6370, 6371, 6372, 6373, 6374, 6375, 6376, 6377, 6379, 6380, 6381, 6382, 6383, 6384, 6385, 6386, 6387, 6388, 6389, 6390, 6391, 6394, 6395, 6396, 6397, 6398, 6399, 6400, 6401, 6402, 6403, 6404, 6405, 6406, 6407, 6408, 6409, 6410, 6411, 6412, 6413, 6414, 6415, 6418, 6419, 6420, 6421, 6422, 6423, 6424, 6426, 6428, 6429, 6430, 6431, 6433, 6434, 6435, 6436, 6437, 6438, 6439, 6440, 6442, 6443, 6444, 6446, 6447, 6448, 6449, 6450, 6451, 6452, 6453, 6455, 6456, 6457, 6459, 6460, 6461, 6462, 6463, 6464, 6465, 6466, 6467, 6468, 6470, 6471, 6472, 6474, 6475, 6476, 6477, 6478, 6479, 6480, 6481, 6482, 6483, 6484, 6485, 6486, 6487, 6488, 6489, 6490, 6491, 6492, 6494, 6496, 6497, 6498, 6499, 6500, 6501, 6502, 6503, 6504, 6505, 6506, 6507, 6508, 6509, 6510, 6511, 6512, 6513, 6514, 6515, 6518, 6520, 6521, 6522, 6523, 6524, 6526, 6527, 6528, 6529, 6531, 6532, 6533, 6534, 6535, 6536, 6537, 6538, 6539, 6540, 6541, 6542, 6543, 6544, 6546, 6548, 6549, 6550, 6552, 6553, 6554, 6555, 6556, 6557, 6558, 6559, 6560, 6561, 6562, 6563, 6564, 6565, 6566, 6567, 6568, 6569, 6570, 6571, 6573, 6575, 6576, 6577, 6579, 6580, 6581, 6582, 6583, 6584, 6585, 6268, 6266, 6349, 6347, 6393, 6392, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6592, 6594, 6597, 6605, 6607, 6609, 6611, 6614, 6617, 6620, 6625, 6627, 6630, 6638, 6640, 6642, 6644, 6647, 6650, 6653, 6658, 6660, 6663, 6671, 6673, 6675, 6677, 6680, 6683, 6686, 6688, 6691, 6694, 6697, 6698, 6703, 6715, 6717, 6720, 6723, 6730, 6732, 6734, 6736, 6750, 6756, 6759, 6762, 6763, 6768, 6782, 6784, 6786, 6788, 6797, 6800, 6803, 6812, 6815, 6821, 6823, 6825, 6829, 6832, 6835, 6838, 6844, 6849, 6852, 6862, 6866, 6869, 6872, 6873, 6878, 6888, 6890, 6893, 6899, 6901, 6904, 6907, 6911, 6914, 6917, 6925, 6930, 6932, 6935, 6946, 6951, 6953, 6956, 6959, 6966, 6970, 6975, 6977, 6980, 6994, 6999, 7001, 7004, 6603, 6601, 6624, 6636, 6634, 6657, 6669, 6667, 6690, 6701, 6706, 6708, 6710, 6712, 6714, 6727, 6729, 6744, 6742, 6740, 6746, 6748, 7019, 7020, 6753, 6755, 6766, 6771, 6773, 6775, 6777, 6779, 6781, 6796, 6794, 6792, 6807, 6809, 6811, 7021, 7022, 6818, 6820, 6842, 6847, 7023, 7024, 6855, 6857, 6861, 6859, 6921, 6876, 6881, 6880, 6883, 6885, 6887, 6897, 6921, 6923, 6928, 6939, 6940, 6942, 6944, 6949, 6961, 6963, 6965, 6969, 6973, 6984, 6985, 6987, 6988, 6990, 6992, 6997, 7008, 7009, 7011, 7012, 7014, 7016, 7018, 61, 62, 63, 7040, 7042, 7050, 7052, 7060, 7062, 7079, 7083, 7084, 7093, 7096, 7098, 7102, 7105, 7108, 7109, 7115, 7117, 7119, 7120, 7121, 7043, 7143, 7144, 6613, 6102, 6099, 6616, 6622, 6619, 7145, 7053, 7146, 7147, 6646, 6141, 6138, 6649, 6655, 6652, 7148, 7063, 7149, 7150, 6679, 6180, 6177, 6682, 7070, 6685, 7151, 6696, 6693, 7074, 7152, 7075, 7153, 7154, 7155, 7156, 7157, 7076, 6722, 6719, 7158, 7159, 6247, 6244, 6241, 7160, 7161, 7162, 7163, 7164, 7165, 7167, 7168, 6761, 6758, 7088, 7169, 7089, 7170, 7171, 7172, 7173, 7174, 7175, 6313, 6310, 6307, 7176, 7177, 7178, 6802, 6799, 7179, 7180, 7181, 7097, 7182, 7184, 7185, 6827, 6363, 6360, 6837, 6834, 7186, 7106, 7187, 7107, 7188, 7190, 7191, 7118, 7192, 7193, 6865, 6919, 6916, 7194, 6871, 6868, 7113, 7195, 7114, 7196, 7197, 7198, 7199, 7200, 7118, 7201, 6913, 6919, 6916, 7202, 7203, 7125, 7204, 7126, 6937, 6934, 7205, 7206, 7207, 7208, 7129, 7209, 7130, 6958, 6955, 6960, 7210, 7211, 7212, 7134, 7213, 7135, 7214, 7136, 6982, 6979, 7215, 7216, 7217, 7218, 7219, 7220, 7139, 7221, 7140, 7006, 7003, 7222, 7223, 7224, 7225, 7226, 7227, 7228, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6089, 6596, 7253, 7256, 7257, 7258, 7259, 7260, 7261, 6128, 6629, 7263, 7266, 7267, 7268, 7269, 7270, 7271, 6167, 6662, 7273, 7276, 7277, 7278, 7279, 7280, 7281, 7283, 7284, 7285, 7287, 7293, 7294, 7295, 7238, 6251, 7298, 7299, 7300, 7301, 7240, 7309, 7310, 7311, 7313, 6317, 7320, 7321, 7322, 7323, 7326, 7327, 7242, 7331, 7243, 7335, 7336, 7337, 7244, 7338, 7339, 7245, 7341, 7343, 7246, 6445, 6892, 7347, 6910, 6458, 6454, 7350, 7351, 7352, 7354, 7355, 7356, 7358, 7359, 6445, 6892, 7364, 6910, 6458, 6454, 7366, 7367, 7368, 7371, 7373, 7374, 7375, 7380, 7382, 7383, 7384, 7385, 7389, 7391, 7393, 7394, 7395, 7402, 7404, 7405, 7406, 7291, 7289, 7305, 7297, 7308, 7317, 7315, 7319, 7329, 7334, 7346, 7363, 7378, 7387, 7400, 7398, 7413, 7411, 7409, 61, 62, 63, 7424, 7425, 7426, 7427, 7431, 7433, 7434, 7435, 7436, 7440, 7442, 7443, 7444, 7445, 7449, 7451, 7456, 7458, 7459, 7461, 7463, 7464, 7465, 7469, 7471, 7473, 7474, 7476, 7478, 7479, 7482, 7483, 7485, 7488, 7489, 7490, 7491, 7492, 7493, 7494, 7496, 7498, 7503, 7504, 7505, 7506, 7507, 7508, 7510, 7514, 7518, 7524, 7528, 7454, 7530, 7531, 7532, 7533, 7455, 7534, 7468, 7535, 7536, 7537, 7477, 7538, 7539, 7487, 7486, 7540, 7501, 7541, 7361, 7513, 7512, 7542, 7517, 7516, 7543, 7523, 7522, 7521, 7544, 7545, 7527, 7526, 7546, 7547, 7548, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7552, 7554, 7555, 7557, 7559, 7560, 7562, 7564, 7565, 7570, 7575, 7581, 7586, 7588, 7589, 7594, 7597, 7262, 7272, 7282, 7605, 7453, 7606, 7569, 7610, 7573, 7612, 7467, 7613, 7579, 7616, 7580, 7619, 7620, 7584, 7585, 7353, 7622, 7500, 7624, 7369, 7376, 7625, 7626, 7520, 7628, 7629, 7396, 7631, 7632, 7633, 7634, 7407, 7636, 7637, 7638, 56, 57, 58, 59, 60, 61, 62, 63, 7689, 7690, 7694, 7696, 7681, 7697, 7430, 7684, 7698, 7439, 7687, 7699, 7448, 7701, 7703, 7705, 7707, 7709, 7615, 7711, 7714, 7582, 7712, 7715, 7693, 7716, 7718, 7623, 7596, 7720, 7721, 7724, 7727, 7729, 7732, 7735, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7748, 7750, 7751, 7753, 7754, 7756, 7700, 7572, 7608, 7611, 7706, 7577, 7618, 7765, 7621, 7768, 7495, 7717, 7772, 7509, 7774, 7775, 7776, 7778, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7749, 7752, 7755, 7815, 7819, 7764, 7824, 7827, 7828, 7829, 7830, 7831, 7708, 7702, 7771, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7875, 7876, 7766, 7769, 7773, 7874, 7873, 7872, 7884, 7885, 7886, 7779, 7731, 7630, 7627, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7816, 7937, 7941, 7942, 7943, 7822, 7939, 7940, 7947, 7948, 7949, 7950, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8000, 8001, 8002, 8005, 8006, 8007, 8008, 8010, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8066, 7820, 7817, 8070, 7946, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8129, 8130, 8131, 8132, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7944, 8067, 8194, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8256, 8258, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8320, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8384, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8448, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8321, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 493, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 655, 657, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 753, 755, 757, 759, 762, 764, 766, 768, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 837, 839, 841, 843, 847, 849, 851, 853, 855, 857, 860, 862, 864, 866, 869, 871, 874, 876, 881, 883, 885, 887, 889, 891, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 919, 921, 924, 926, 929, 931, 934, 936, 939, 941, 943, 945, 948, 950, 953, 955, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1041, 1043, 1045, 1047, 1050, 1052, 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1083, 1085, 1087, 1089, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1143, 1145, 1148, 1150, 1153, 1155, 1158, 1160, 1166, 1168, 1171, 1173, 1176, 1178, 1181, 1183, 1186, 1188, 1190, 1192, 1194, 1196, 1199, 1201, 1204, 1206, 1208, 1210, 1212, 1214, 1216, 1218, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1373, 1375, 1377, 1380, 1382, 1384, 1386, 1388, 1390, 1392, 1394, 1397, 1399, 1401, 1403, 1406, 1408, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1431, 1433, 1435, 1437, 1439, 1441, 1444, 1446, 1449, 1451, 1457, 1459, 1462, 1464, 1468, 1470, 1472, 1474, 1476, 1478, 1481, 1483, 1486, 1488, 1491, 1493, 1496, 1498, 1501, 1503, 1506, 1508, 1511, 1513, 1515, 1517, 1519, 1521, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1556, 1558, 1561, 1563, 1565, 1567, 1570, 1572, 1575, 1577, 1582, 1584, 1587, 1589, 1592, 1594, 1597, 1599, 1602, 1604, 1607, 1609, 1612, 1614, 1617, 1619, 1622, 1624, 1627, 1629, 1632, 1634, 1039, 1048, 1163, 1163, 136, 1163, 1163, 137, 760, 1442, 1442, 1429, 1429, 751, 1454, 1454, 1442, 1442, 572, 572, 760, 1395, 1395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1429, 1429, 1454, 1454, 1442, 1442, 1404, 1404, 1235, 1235, 1256, 1256, 1568, 1568, 751, 1454, 1454, 1442, 1442, 374, 374, 1454, 1454, 1442, 1442, 395, 395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1454, 1454, 1442, 1442, 494, 1395, 1395, 1404, 1404, 523, 1395, 1395, 1404, 1404, 572, 572, 751, 751, 572, 572, 751, 751, 1522, 1522, 1568, 1568, 572, 572, 751, 751, 917, 917, 1442, 1442, 1235, 1256, 1395, 1395, 1404, 1404, 769, 769, 769, 769, 751, 751, 769, 769, 769, 769, 760, 760, 770, 770, 867, 878, 835, 844, 867, 878, 917, 917, 957, 957, 1039, 1048, 1081, 1090, 1124, 1124, 1124, 1124, 1163, 1163, 1163, 1163, 1442, 1442, 1235, 1256, 1235, 1235, 1256, 1256, 1454, 1454, 1579, 1395, 1404, 1395, 1404, 1454, 1454, 1442, 1442, 1442, 1442, 1454, 1454, 1522, 1522, 1568, 1568, 1579, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3579, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3751, 3753, 3755, 3757, 3759, 3761, 3763, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 3801, 3803, 3805, 3807, 3809, 3811, 3813, 3815, 3817, 3819, 3821, 3823, 3825, 3827, 3829, 3831, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3885, 3887, 3889, 3891, 3893, 3895, 3897, 3899, 3901, 3903, 3905, 1655, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1681, 1688, 1689, 1690, 1691, 1696, 1697, 1698, 1701, 1702, 1708, 1709, 1710, 1722, 1723, 1726, 1727, 1730, 1731, 1733, 1734, 1737, 1738, 1741, 1742, 1744, 1745, 1748, 1751, 1765, 1766, 1781, 1782, 1788, 1789, 1790, 1791, 1794, 1795, 1797, 1800, 1801, 1804, 1805, 1806, 1807, 1811, 1812, 1815, 1816, 1817, 1818, 1822, 1823, 1826, 1827, 1830, 1831, 1843, 1844, 1847, 1848, 1854, 1855, 1858, 1859, 1879, 1884, 1885, 1888, 1889, 1897, 1902, 1903, 1906, 1907, 1908, 1909, 1910, 1911, 1913, 1914, 1915, 1916, 1923, 1926, 1929, 1930, 1931, 1932, 1933, 1934, 1940, 1946, 1958, 1959, 1962, 1964, 1983, 1984, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2006, 2008, 2022, 2025, 2031, 2033, 2039, 2042, 2050, 2051, 2058, 2061, 2082, 2085, 2094, 2097, 2107, 2108, 2110, 2111, 2119, 2120, 2122, 2123, 2135, 2136, 2138, 2140, 2152, 2153, 2166, 2167, 2176, 2177, 2184, 2197, 2200, 2213, 2216, 2219, 2220, 2223, 2224, 2230, 2231, 2234, 2235, 2257, 2258, 2269, 2270, 2273, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4097, 4096, 4098, 4100, 4102, 4101, 4104, 4103, 4105, 4379, 4106, 4108, 4107, 4110, 4109, 4111, 4113, 4112, 4127, 4379, 4115, 4114, 4116, 4119, 4118, 4121, 4120, 4123, 4122, 4125, 4124, 4127, 4126, 4129, 4128, 4484, 4487, 4131, 4130, 4133, 4132, 4134, 4135, 4137, 4136, 4491, 4493, 4138, 4140, 4139, 572, 4496, 4203, 4202, 4498, 4143, 4142, 4144, 4145, 572, 4500, 4269, 4147, 4221, 4271, 4274, 4222, 4182, 4279, 4225, 4429, 4428, 4503, 4149, 4148, 4505, 4151, 4150, 4507, 4152, 4509, 4154, 4153, 4511, 4156, 4155, 4513, 4157, 4515, 4159, 4158, 4160, 4159, 4161, 4162, 4164, 4163, 4165, 4166, 4168, 4167, 4169, 4170, 4172, 4171, 4173, 4519, 4174, 4267, 4266, 4220, 4269, 4271, 4175, 4274, 4176, 4182, 4279, 4177, 4430, 4396, 4521, 4179, 4178, 4181, 4180, 4182, 4523, 4525, 572, 572, 4527, 572, 4186, 4185, 4530, 4188, 4187, 4532, 4534, 1466, 4191, 4190, 4536, 4193, 4192, 4538, 4540, 1466, 4203, 4195, 4542, 4400, 4399, 4544, 4205, 4196, 4546, 4197, 4198, 4211, 4199, 4213, 4212, 4267, 4200, 4201, 4203, 4202, 4548, 4400, 4399, 4550, 4205, 4204, 1429, 4442, 4441, 4552, 4440, 4439, 4554, 4444, 4443, 1466, 4207, 4260, 4209, 4208, 4268, 4211, 4210, 4213, 4212, 4214, 4220, 4215, 4221, 4274, 4222, 4216, 4279, 4218, 4429, 4219, 4557, 4280, 4416, 4559, 4220, 4269, 4221, 4271, 4274, 4222, 4223, 4279, 4225, 4429, 4428, 4562, 4430, 4396, 4564, 4566, 4568, 4226, 4570, 4572, 4227, 4229, 4228, 4230, 4232, 4231, 4234, 4233, 4236, 4235, 4576, 4578, 4580, 4238, 4237, 4239, 4242, 4241, 4244, 4243, 4245, 4248, 4247, 4250, 4249, 4252, 4251, 4254, 4253, 4255, 4257, 4256, 4259, 4258, 4584, 4260, 4261, 4262, 4264, 4263, 4265, 4267, 4266, 4268, 4270, 4269, 4272, 4271, 4274, 4273, 4275, 4277, 4279, 4278, 4429, 4428, 4588, 4280, 4416, 4590, 4592, 4594, 4596, 4282, 4281, 4598, 4600, 4602, 4284, 4283, 769, 769, 4459, 4454, 4285, 4473, 4464, 4286, 4287, 4466, 4288, 4290, 4289, 4292, 4291, 4294, 4293, 4296, 4295, 4298, 4297, 4299, 4301, 4304, 4303, 4305, 4307, 4306, 4309, 4308, 4310, 4312, 4314, 4313, 4315, 4318, 4317, 4612, 4320, 4319, 4322, 4321, 4324, 4323, 4326, 4325, 4328, 4327, 4330, 4329, 4332, 4331, 4333, 4334, 4335, 4337, 4336, 4339, 4338, 4341, 4340, 4342, 4343, 4344, 4346, 4345, 4348, 4347, 4350, 4349, 4351, 4353, 4352, 4354, 4356, 4355, 4358, 4357, 4379, 4382, 4359, 4361, 4360, 4362, 4364, 4363, 4365, 4620, 4366, 4622, 4367, 4369, 4368, 4370, 4372, 4371, 4373, 4624, 4374, 4626, 4375, 4377, 4376, 4378, 4379, 4382, 4381, 4383, 4459, 4384, 4394, 4628, 4385, 4386, 4387, 4389, 4388, 4391, 4418, 4393, 4392, 4422, 4421, 4394, 4395, 4632, 4429, 4428, 4430, 4396, 4391, 4418, 4393, 4392, 4422, 4421, 4394, 4395, 4634, 4429, 4428, 4430, 4396, 4398, 4397, 4400, 4399, 4636, 4401, 4402, 4404, 4403, 4406, 4406, 4409, 4408, 4419, 4410, 4421, 4411, 4412, 4414, 4426, 4415, 4429, 4428, 4431, 4416, 4418, 4417, 4420, 4419, 4422, 4421, 4423, 4425, 4427, 4426, 4429, 4428, 4431, 4430, 4433, 4432, 4643, 4435, 4434, 4645, 4437, 4436, 1429, 4440, 4439, 4647, 4442, 4441, 4649, 4444, 4443, 1466, 4446, 4447, 4448, 4449, 4451, 4450, 4453, 4452, 4457, 4459, 4454, 4461, 4455, 4464, 4463, 4456, 4467, 4466, 4651, 4457, 4460, 4459, 4462, 4461, 4464, 4463, 4465, 4467, 4466, 4653, 4469, 4468, 4471, 4470, 4473, 4472, 4475, 4474, 4476, 4478, 4477, 4480, 4479, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1679, 1680, 1682, 1683, 1684, 1685, 1686, 1687, 1692, 1693, 1694, 1695, 1699, 1700, 1703, 1704, 1705, 1706, 1707, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1724, 1725, 1728, 1729, 1732, 1735, 1736, 1739, 1740, 1743, 1746, 1747, 1749, 1750, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1783, 1784, 1785, 1786, 1787, 1792, 1793, 1796, 1798, 1799, 1802, 1803, 1808, 1809, 1810, 1813, 1814, 1819, 1820, 1821, 1824, 1825, 1828, 1829, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1845, 1846, 1849, 1850, 1851, 1852, 1853, 1856, 1857, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1880, 1881, 1882, 1883, 1886, 1887, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1898, 1899, 1900, 1901, 1904, 1905, 1912, 1917, 1918, 1919, 1920, 1921, 1922, 1924, 1925, 1927, 1928, 1935, 1936, 1937, 1938, 1939, 1941, 1942, 1943, 1944, 1945, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1960, 1961, 1963, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1985, 1986, 1995, 1996, 2003, 2004, 2005, 2007, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2023, 2024, 2026, 2027, 2028, 2029, 2030, 2032, 2034, 2035, 2036, 2037, 2038, 2040, 2041, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2052, 2053, 2054, 2055, 2056, 2057, 2059, 2060, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2083, 2084, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2095, 2096, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2109, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2121, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2137, 2139, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2178, 2179, 2180, 2181, 2182, 2183, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2198, 2199, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2214, 2215, 2217, 2218, 2221, 2222, 2225, 2226, 2227, 2228, 2229, 2232, 2233, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2271, 2272, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 4708, 4707, 4732, 4912, 4555, 4555, 4732, 4912, 4555, 4555, 4896, 4899, 4912, 4962, 4961, 4967, 4966, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 5249, 5253, 5255, 5260, 5262, 5265, 5269, 5272, 5274, 5276, 5278, 5280, 5282, 5284, 5286, 5290, 5293, 5296, 5298, 5303, 5305, 5307, 5310, 5312, 5314, 5316, 5319, 5321, 5324, 5326, 5330, 5334, 5338, 5342, 5344, 5346, 5348, 5351, 5353, 5355, 5357, 5363, 5365, 5368, 5370, 5373, 5375, 5377, 5381, 5383, 5385, 5388, 5390, 5392, 5395, 5397, 5399, 5404, 5407, 5409, 5411, 5413, 5415, 5418, 5420, 5422, 5424, 5426, 5428, 5431, 5433, 5435, 5439, 5442, 5444, 5446, 5448, 5451, 5453, 5456, 5458, 5460, 5462, 5465, 5467, 5472, 5475, 5478, 5480, 5482, 5486, 5488, 5490, 5492, 5494, 5498, 5500, 5502, 5505, 5507, 5509, 5511, 5513, 5515, 5519, 5522, 5524, 5528, 5531, 5533, 5535, 5537, 5539, 5541, 5543, 5545, 5550, 5552, 5554, 5559, 5561, 5563, 5566, 5569, 5571, 5574, 5576, 5579, 5584, 5587, 5592, 5596, 5599, 5605, 5607, 5609, 5611, 5615, 5617, 5619, 5621, 5623, 5627, 5629, 5631, 5633, 5637, 5641, 5643, 5645, 5649, 5651, 5653, 5655, 5657, 5659, 5663, 5665, 5667, 5669, 5671, 5673, 5676, 5678, 5680, 5687, 5689, 5692, 5694, 5696, 5699, 5702, 5704, 5706, 5709, 5711, 5713, 5715, 5717, 5720, 5722, 5517, 5250, 5525, 5263, 5556, 5060, 5058, 5070, 5068, 5517, 5516, 5525, 5263, 5556, 5060, 5058, 5070, 5068, 5517, 5516, 5525, 5547, 5556, 5060, 5058, 5070, 2399, 2400, 5634, 5470, 5299, 2419, 5301, 2424, 5361, 2431, 2432, 5287, 5317, 5322, 4811, 4819, 5328, 5291, 5332, 5331, 5336, 5335, 5294, 4494, 5360, 4494, 5361, 5470, 5299, 5682, 2503, 5301, 2508, 5361, 5317, 5322, 4811, 4819, 5328, 5327, 5332, 5331, 5336, 5335, 5401, 2555, 2556, 5359, 4528, 5360, 4528, 5361, 4811, 4819, 5379, 5378, 5401, 2605, 4897, 2607, 4900, 2613, 5470, 5469, 5682, 2641, 2642, 2644, 2645, 5496, 5495, 5517, 5516, 5525, 5547, 5556, 5060, 5058, 5070, 5068, 5634, 5684, 5597, 5684, 5602, 5601, 5603, 5613, 5625, 5634, 5684, 5638, 5639, 5684, 5682, 5690, 5700, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 5863, 5862, 5861, 5760, 2289, 2290, 5864, 5866, 5865, 2294, 5761, 5767, 5838, 5839, 5869, 5871, 5762, 5764, 5763, 2304, 5878, 5876, 2307, 5881, 5879, 5882, 5884, 5765, 5256, 5887, 5886, 2316, 2317, 5889, 5888, 2320, 2321, 5890, 5257, 5863, 5862, 5861, 5860, 2328, 2329, 5866, 5865, 5864, 2333, 5836, 5767, 5838, 5839, 5869, 5872, 5871, 5764, 5763, 2343, 5878, 5876, 2346, 5881, 5879, 5882, 5884, 5765, 5266, 5887, 5886, 2355, 2356, 5889, 5888, 2359, 2360, 5890, 5267, 5863, 5862, 5861, 5860, 2367, 2368, 5866, 5865, 5864, 2372, 5766, 5767, 5768, 5839, 5869, 5872, 5871, 5770, 5769, 2382, 5878, 5877, 2385, 5881, 5880, 5882, 5884, 5883, 5771, 5887, 5886, 2394, 2395, 5889, 5888, 2398, 5890, 5594, 5777, 5919, 5778, 5923, 5922, 5924, 2409, 2410, 2411, 5846, 5845, 5847, 5849, 5484, 5852, 5851, 2420, 4489, 5938, 5939, 2425, 4489, 5857, 5858, 5820, 5800, 5977, 5825, 5824, 5812, 5811, 5774, 5815, 5814, 5816, 2441, 5793, 5819, 5818, 5794, 5796, 4560, 5798, 5785, 5775, 2451, 5787, 5786, 2454, 5802, 5801, 2457, 5804, 5789, 5788, 2461, 2462, 2463, 5776, 2465, 2466, 5791, 2468, 2469, 5792, 5779, 5781, 4560, 5783, 2475, 2476, 2477, 2478, 5832, 5834, 5833, 5835, 2483, 4494, 5857, 5858, 5777, 5905, 5778, 5923, 5922, 5924, 2493, 2494, 2495, 5846, 5845, 5847, 5849, 5484, 5852, 5851, 2504, 4501, 5938, 5939, 2509, 4501, 5857, 5858, 5779, 5781, 4560, 5783, 5785, 5784, 2519, 5787, 5786, 2522, 5802, 5801, 2525, 5804, 5789, 5788, 2529, 2530, 2531, 5790, 2533, 2534, 5791, 2536, 2537, 5792, 5812, 5811, 5813, 5815, 5814, 5816, 2545, 5793, 5819, 5818, 5794, 5796, 4560, 5798, 5820, 5800, 6013, 5825, 5824, 2559, 2560, 2561, 2562, 5832, 5834, 5833, 5835, 2567, 4528, 5857, 5858, 5802, 5801, 2573, 5804, 5803, 2576, 5806, 5805, 5807, 2580, 2581, 5810, 5809, 5808, 5812, 5811, 5813, 5815, 5814, 5816, 2591, 5819, 5818, 5817, 5820, 5822, 4555, 5825, 5824, 5826, 5828, 4560, 5831, 5830, 2606, 2608, 5832, 5835, 5834, 5833, 4913, 5857, 5858, 5836, 5837, 5838, 5839, 5842, 5841, 5840, 5843, 5919, 5904, 5921, 5923, 5922, 5924, 2631, 2632, 2633, 5846, 5845, 5847, 5849, 5484, 5852, 5851, 6033, 4963, 6035, 4968, 5938, 5939, 2649, 2650, 5855, 5857, 5858, 5863, 5862, 5861, 5860, 2658, 2659, 5866, 5865, 5864, 2663, 5867, 5868, 5869, 5872, 5871, 5875, 5874, 5873, 2672, 5878, 5877, 5876, 2676, 5881, 5880, 5879, 5882, 5884, 5883, 5572, 5887, 5886, 2686, 2687, 5889, 5888, 2690, 2691, 5890, 5594, 5907, 5909, 5647, 5912, 5911, 5913, 5915, 5661, 5918, 5917, 5905, 5920, 5921, 5922, 5923, 5924, 2710, 2711, 5925, 2713, 5892, 5938, 5939, 5907, 5909, 5647, 5912, 5911, 5913, 5915, 5661, 5918, 5917, 5905, 5920, 5921, 5922, 5923, 5924, 2733, 2734, 2735, 5925, 2737, 5893, 5938, 5939, 5894, 5896, 2743, 5898, 5897, 5899, 5901, 2748, 5903, 5902, 5907, 5909, 5647, 5912, 5911, 5905, 5904, 5921, 5922, 5923, 5924, 2762, 2763, 5925, 2765, 5927, 5929, 5930, 2769, 5931, 5933, 5934, 5907, 5909, 5647, 5912, 5911, 5913, 5915, 5661, 5918, 5917, 5920, 5919, 5921, 5923, 5922, 5924, 2789, 2790, 5925, 2792, 5927, 5929, 5930, 2796, 5931, 5933, 5934, 5935, 5936, 5938, 5939, 58, 59, 60, 61, 62, 63, 2285, 2286, 2287, 2288, 6085, 2291, 2292, 2293, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2305, 2306, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 6112, 2318, 2319, 6116, 2322, 2323, 2324, 2325, 2326, 2327, 6124, 2330, 2331, 2332, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2344, 2345, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 6151, 2357, 2358, 6155, 2361, 2362, 2363, 2364, 2365, 2366, 6163, 2369, 2370, 2371, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2383, 2384, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 6190, 2396, 2397, 5967, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 6203, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 6212, 2421, 2422, 2423, 6216, 2426, 2427, 2428, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2452, 2453, 2455, 2456, 2458, 2459, 2460, 6253, 2464, 6256, 2467, 6259, 2470, 2471, 2472, 2473, 2474, 2479, 2480, 2481, 2482, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 6284, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 6293, 2505, 2506, 2507, 6297, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2520, 2521, 2523, 2524, 2526, 2527, 2528, 6319, 2532, 6322, 2535, 6325, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2557, 2558, 2563, 2564, 2565, 2566, 2568, 2569, 2570, 2571, 2572, 2574, 2575, 2577, 2578, 2579, 6368, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2609, 2610, 2611, 2612, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 6416, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2643, 2646, 2647, 2648, 6432, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 6441, 2660, 2661, 2662, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2673, 2674, 2675, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 6469, 2688, 2689, 6473, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 6493, 2712, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 6516, 2736, 2738, 2739, 2740, 2741, 2742, 2744, 2745, 2746, 2747, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 6545, 2764, 2766, 2767, 2768, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 6572, 2791, 2793, 2794, 2795, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 6267, 6265, 6348, 6346, 6026, 6024, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6593, 6595, 6598, 6606, 6608, 6610, 6612, 6615, 6618, 6621, 6626, 6628, 6631, 6639, 6641, 6643, 6645, 6648, 6651, 6654, 6659, 6661, 6664, 6672, 6674, 6676, 6678, 6681, 6684, 6687, 5968, 6692, 6695, 6204, 6699, 6704, 6716, 6718, 6721, 6724, 6731, 6733, 6735, 6737, 6751, 6757, 6760, 6285, 6764, 6769, 6783, 6785, 6787, 6789, 6798, 6801, 6804, 6813, 6816, 6822, 6824, 6826, 6830, 6833, 6836, 6839, 6845, 6850, 6853, 6863, 6867, 6870, 6417, 6874, 6879, 6889, 6891, 6894, 6900, 6902, 6905, 6908, 6912, 6915, 6918, 6926, 6931, 6933, 6936, 6947, 6952, 6954, 6957, 6517, 6967, 6971, 6976, 6978, 6981, 6995, 7000, 7002, 7005, 6602, 6600, 6623, 6635, 6633, 6656, 6668, 6666, 6689, 6700, 6705, 6707, 6709, 6711, 6713, 6726, 6728, 6743, 6741, 6739, 6745, 6747, 2865, 2866, 6273, 6754, 6765, 6770, 6772, 6774, 6776, 6778, 6780, 6795, 6793, 6791, 6806, 6808, 6810, 2895, 2896, 6354, 6819, 6841, 6846, 2911, 2912, 6028, 6856, 6860, 6858, 6920, 6875, 6427, 6425, 6882, 6884, 6886, 6896, 6920, 6922, 6927, 6938, 6495, 6941, 6943, 6948, 6519, 6962, 6964, 6968, 6972, 6983, 6547, 6986, 6551, 6989, 6991, 6996, 7007, 6574, 7010, 6578, 7013, 7015, 7017, 61, 62, 63, 7041, 6599, 7051, 6632, 7061, 6665, 6725, 6738, 6752, 6790, 6805, 6817, 6831, 6840, 6854, 6864, 7116, 6895, 6903, 6906, 6909, 6604, 2807, 2808, 7046, 7045, 7044, 7047, 7049, 7048, 2815, 6637, 2819, 2820, 7056, 7055, 7054, 7057, 7059, 7058, 2827, 6670, 2831, 2832, 7066, 7065, 7064, 7067, 7069, 7068, 2839, 7072, 7071, 7073, 2843, 6702, 2845, 2846, 2847, 2848, 2849, 6222, 7078, 7077, 2854, 2855, 7082, 7081, 7080, 2860, 2861, 2862, 2863, 2864, 7166, 2868, 2869, 7086, 7085, 7087, 2873, 6767, 2875, 2876, 2877, 2878, 2879, 2880, 7092, 7091, 7090, 2885, 2886, 2887, 7095, 7094, 2891, 2892, 2893, 6343, 7183, 2898, 2899, 7101, 7100, 7099, 7104, 7103, 2907, 6843, 2909, 6848, 7189, 2914, 2915, 6898, 2919, 2920, 7122, 7124, 7123, 2927, 7111, 7110, 7112, 2931, 6877, 2933, 2934, 2935, 2936, 2937, 6898, 2941, 7122, 7124, 7123, 2948, 2949, 6924, 2951, 6929, 7128, 7127, 2955, 2956, 2957, 2958, 6945, 2960, 6950, 7132, 7131, 7133, 2965, 2966, 2967, 6525, 2969, 6530, 2971, 6974, 7138, 7137, 2975, 2976, 2977, 2978, 2979, 2980, 6993, 2982, 6998, 7142, 7141, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7233, 7232, 2806, 2809, 2810, 2811, 2812, 2813, 2814, 7235, 7234, 2818, 2821, 2822, 2823, 2824, 2825, 2826, 7237, 7236, 2830, 2833, 2834, 2835, 2836, 2837, 2838, 2840, 2841, 2842, 2844, 2850, 2851, 2852, 6231, 7239, 2857, 2858, 2859, 7302, 6749, 2870, 2871, 2872, 2874, 7241, 2882, 2883, 2884, 7324, 2888, 2889, 6333, 2894, 6814, 2900, 2901, 2902, 6828, 2904, 2905, 6378, 2908, 2910, 6851, 7249, 7248, 2918, 7252, 7251, 7247, 2924, 2925, 2926, 2928, 2929, 2930, 2932, 7360, 7249, 7248, 2940, 7252, 7251, 7250, 2945, 2946, 2947, 2950, 2952, 2953, 2954, 2959, 2961, 2962, 2963, 2964, 2968, 2970, 2972, 2973, 2974, 2981, 2983, 2984, 2985, 7290, 7288, 7304, 7296, 7307, 7316, 7314, 7318, 7328, 7333, 7345, 7362, 7377, 7386, 7399, 7397, 7412, 7410, 7408, 61, 62, 63, 2804, 2805, 7254, 7428, 7432, 2816, 2817, 7264, 7437, 7441, 2828, 2829, 7274, 7446, 7450, 7452, 7457, 2853, 2856, 7462, 7303, 2867, 7466, 2881, 7472, 7325, 7475, 2890, 2897, 7480, 2903, 7484, 2906, 2913, 2916, 2917, 7348, 2921, 2922, 2923, 7497, 7499, 2938, 2939, 7365, 2942, 2943, 2944, 7511, 7515, 7519, 7525, 7529, 7286, 3004, 3005, 3006, 3009, 7292, 3011, 7312, 3015, 3016, 3019, 7330, 3021, 3022, 7342, 7340, 3028, 7357, 3035, 7502, 7372, 7370, 3043, 7381, 7379, 3047, 7392, 7390, 7388, 3052, 3053, 7403, 7401, 3057, 3058, 3059, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7553, 7255, 7429, 7558, 7265, 7438, 7563, 7275, 7447, 7460, 7470, 7481, 7587, 7349, 7590, 7595, 7598, 7556, 7561, 7566, 3002, 7567, 7607, 7568, 3010, 7306, 3013, 7574, 7614, 7578, 3020, 7332, 3024, 3025, 7583, 7344, 7592, 3033, 7593, 3036, 7600, 7601, 3041, 3042, 7602, 3045, 3046, 7603, 3049, 3050, 3051, 7635, 7604, 3055, 3056, 7639, 56, 57, 58, 59, 60, 61, 62, 63, 7571, 7576, 7591, 7599, 7680, 2994, 7682, 7683, 2997, 7685, 7686, 3000, 7688, 3003, 3007, 3012, 3014, 3018, 7710, 3023, 3026, 7691, 7713, 3029, 7692, 3031, 3034, 7719, 7695, 3038, 3040, 3044, 3048, 7730, 3054, 7640, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2993, 2995, 2996, 2998, 2999, 3001, 7757, 7744, 7758, 7759, 7760, 7745, 7763, 3027, 7767, 3030, 7746, 7770, 3037, 7747, 7722, 7725, 7728, 7733, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7809, 7811, 7813, 3008, 3017, 7821, 3032, 3039, 7723, 7726, 7777, 7734, 7818, 7814, 7825, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7609, 7761, 7877, 7878, 7879, 7812, 7810, 7808, 3063, 3067, 3069, 7883, 7882, 7881, 7880, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7936, 7762, 3060, 3061, 3062, 7938, 7823, 7826, 3071, 3072, 3073, 3074, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7704, 7617, 8003, 3065, 3068, 3070, 8009, 8011, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8004, 8065, 8064, 8071, 8068, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3064, 3066, 8069, 3076, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8192, 8193, 3075, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8257, 8195, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7945, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8128, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3077, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8512, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 64
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3136
#define SIZE_OF_AC 5504
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[135*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 49*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 50*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 51*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 52*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 53*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 54*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 55*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 56*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 57*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 58*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 59*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 60*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 61*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 62*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 63*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
__syncthreads();
R[i + 64*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 65*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 66*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 67*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 68*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 69*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 70*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 71*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 72*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
__syncthreads();
R[i + 73*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 74*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 75*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 76*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 77*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 78*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 79*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 80*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 81*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
__syncthreads();
R[i + 82*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 83*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 84*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 85*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 86*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 87*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 88*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 89*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
__syncthreads();
R[i + 90*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 91*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 92*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 93*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 94*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
__syncthreads();
R[i + 95*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 96*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 97*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 98*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 99*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 100*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 101*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 102*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
__syncthreads();
R[i + 103*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 104*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 105*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 106*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 107*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 108*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 109*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
__syncthreads();
R[i + 110*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 111*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
R[i + 112*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
__syncthreads();
R[i + 113*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 114*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 115*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
__syncthreads();
R[i + 116*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 117*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
__syncthreads();
R[i + 118*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 119*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
__syncthreads();
R[i + 120*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
__syncthreads();
R[i + 121*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
__syncthreads();
R[i + 122*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
__syncthreads();
R[i + 123*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
__syncthreads();
R[i + 124*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
__syncthreads();
R[i + 125*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
__syncthreads();
R[i + 126*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
__syncthreads();
R[i + 127*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
__syncthreads();
R[i + 128*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
__syncthreads();
R[i + 129*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
__syncthreads();
R[i + 130*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
__syncthreads();
R[i + 131*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
__syncthreads();
R[i + 132*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
__syncthreads();
R[i + 133*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
__syncthreads();
R[i + 134*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
if (i==0) { final += R[134*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
| 73bb88490b8fd6d3bb6bf1af8d4082e3fece084b.cu | float h_A[]= {
0.5203431404205534, 0.8397212236917517, 0.8480297885975157, 0.5826219921812311, 0.8835936178913075, 0.5035784748336407, 0.7515095002498209, 0.9251304241177449, 0.7090255192089898, 0.8358676530410938, 0.8610267321433007, 0.5111123121975225, 0.5228948919205396, 0.8433140045336898, 0.8026350145159813, 0.5784592072770343, 0.5786629850526193, 0.7016442448995324, 0.9064564934513507, 0.8288443418591398, 0.6276902458280357, 0.6203700901002256, 0.6468332074041738, 0.7369908913233524, 0.636306788337418, 0.5253791181005871, 0.8769324735620941, 0.5420684137293498, 0.6508777416335889, 0.8821156914037118, 0.8976250606466047, 0.6087595765813151, 0.8708529435738905, 0.9156211084511412, 0.6490906569126473, 0.7461930584917886, 0.7079483669140391, 0.5892851638022565, 0.6432305871550201, 0.5707314888427635, 0.8474908995534844, 0.5726122299939299, 0.6585698746709817, 0.6302103230255871, 0.7657838012516989, 0.8087807480983893, 0.9210323533924294, 0.8955543439765705, 0.5583321562952835, 0.708213489759663, 0.8416640038215653, 0.6330836032968999, 0.7201827739505046, 0.9321913054218232, 0.7020747442540791, 0.7053848528741964, 0.9771554556504422, 0.5609698632907133, 0.5315478943284, 0.7876828456394924, 0.5579599312495511, 0.6366342251557487, 0.8364642688013271, 0.9236731402122698, 0.6253979889733297, 0.6825800606759242, 0.9238888155829234, 0.6074425006711761, 0.6908413724574975, 0.5104678654098072, 0.9802203729125396, 0.8828337394804235, 0.5250817946344204, 0.70397495668948, 0.7729071667971227, 0.735975147985976, 0.5653131245613776, 0.7575126172072388, 0.5751377499739115, 0.8024426496853787, 0.8915080767630985, 0.6212695660026939, 0.8087143486195272, 0.8107431743984268, 0.8025250072478772, 0.5300264243571515, 0.5829735010271118, 0.6818140734353173, 0.7745569279648881, 0.9947518710683161, 0.904275093601373, 0.6972181232397444, 0.8240464942563854, 0.5231453145673379, 0.5091364629855013, 0.885059504946083, 0.6222865977997725, 0.9496123412793068, 0.9742295591013548, 0.6775681294467919, 0.8748045298794692, 0.6359858312355736, 0.6876187317033112, 0.9236274294753131, 0.690211699853871, 0.6228848727884637, 0.8577516942663482, 0.6732975029748434, 0.8801466872283896, 0.917260529958412, 0.9826311419772767, 0.7193558438522033, 0.8324480784589878, 0.9328799835470047, 0.7339317804794636, 0.644083753667136, 0.7272132590971723, 0.5918541967807439, 0.6383669203464157, 0.7971867704819374, 0.5418019895417568, 0.960757842379981, 0.8538155006150716, 0.5170548275754467, 0.5486801645884092, 0.6716797662333205, 0.5082173052270811, 0.5250332622756126, 0.5860733912750284, 0.9633063812374363, 0.589115057309223, 0.9210013787595408, 0.777349725023708, 0.8321791480369274, 0.8086622981246018, 0.8576255427406076, 0.8025111002257476, 0.963913613664537, 0.8985864318102288, 0.7136332256952942, 0.6260023334705365, 0.7623378153743448, 0.692904374970135, 0.56478856896642, 0.5780435356751124, 0.9221311782392646, 0.850647007139613, 0.9546493052685845, 0.742091024204724, 0.9923063172877842, 0.7162066763553048, 0.9752569385794214, 0.9847684066286679, 0.8000828899754004, 0.6483585309598155, 0.5145761504298918, 0.7728986587297918, 0.571893701449103, 0.5570382411123982, 0.8142794188165889, 0.5640749450201907, 0.5703354728556698, 0.6727772585781636, 0.5373639670721617, 0.6493939056546429, 0.7816280453440945, 0.7852313971353728, 0.6669601081226659, 0.7152824330414025, 0.9087963461149877, 0.5629025948317353, 0.5226951240721222, 0.8884100828912305, 0.5945879418911111, 0.7165184567397249, 0.6815164198963927, 0.9578082873907765, 0.5370408889397291, 0.6872951422633078, 0.7700489996601028, 0.6647774966937952, 0.6105783333401239, 0.80106307832013, 0.7886757785212628, 0.9073398643564696, 0.6949801819937675, 0.8826323485122318, 0.9274975684571378, 0.8464947144708868, 0.7141601849435762, 0.8111790174913063, 0.5763076605057995, 0.5339216582459398, 0.8130581142835516, 0.7959929545403218, 0.8030484675140027, 0.574959029701118, 0.5236677290527036, 0.6572891248604242, 0.8446096183430976, 0.5656027968607965, 0.7093239835556415, 0.6587263537731841, 0.5713833321692854, 0.5612106629534684, 0.9700281238199759, 0.5514705706060269, 0.6388550920157673, 0.8519688924448466, 0.8786574280045296, 0.6038599787818402, 0.9416941138117794, 0.6358612241110977, 0.8883704966811234, 0.6996190723416105, 0.9956088931169134, 0.8478241997193303, 0.6901642397367104, 0.8128114248134564, 0.6810427891619766, 0.8204095406291232, 0.9621985629562867, 0.856367981697269, 0.7653269594599583, 0.5987679760230213, 0.589995794204615, 0.5911120106298475, 0.6026776533838003, 0.9977219637801666, 0.8214772892477158, 0.689449436241151, 0.9138267458928123, 0.7884456857313399, 0.6362271185118022, 0.9542787691026922, 0.9951864189710133, 0.8463769665741032, 0.7688950457313333, 0.5665342455748519, 0.8295428719744601, 0.6057766083356046, 0.5577148007093304, 0.8745160877131563, 0.9079123759373582, 0.7551401106583036, 0.7487469164800666, 0.5141307413066468, 0.9692700283133449, 0.5812865011517745, 0.8202942948141845, 0.9570295538375908, 0.5176189345807687, 0.7591666083004392, 0.5341912971642238, 0.5853285510634902, 0.7604518143749598, 0.9710591468406042, 0.9543487942046693, 0.8138051275076188, 0.5818889405578049, 0.5664982816595141, 0.585963715221935, 0.8343838991230255, 0.8454397011375565, 0.6160931399511154, 0.8539195715572916, 0.8895639195108132, 0.9429636689318324, 0.611495721841149, 0.6117046330257163, 0.9894967203901341, 0.7521397422548455, 0.9617920890879745, 0.8009040359282134, 0.8365388313621254, 0.7086223197973793, 0.6761395667914698, 0.7104839326502712, 0.5818943252981688, 0.5711469610029383, 0.7867090520924566, 0.8116272798680475, 0.9233863740820099, 0.6839733274237645, 0.5084015908056441, 0.7755782097457264, 0.8218920257799069, 0.7618632525227906, 0.8478914900695964, 0.876673622985092, 0.8177494265091705, 0.7983190918419649, 0.9216389192842236, 0.5644462757298133, 0.8991894436630357, 0.5496826013438256, 0.7100998144584764, 0.7036120049829749, 0.850097458501377, 0.9066459789622109, 0.9755218742966907, 0.7268819248339657, 0.877474368407867, 0.5347710153725337, 0.5063588460695527, 0.6599220403085961, 0.9224493886278775, 0.8207356018895275, 0.9796445415394676, 0.6853064775009721, 0.6211235849246309, 0.5546449642118234, 0.950916063558439, 0.5724712842120043, 0.8992875345051112, 0.5408635564762417, 0.8096707297810115, 0.7793897589622241, 0.7551693658343366, 0.7324034479293573, 0.9044752427090397, 0.8512337140305412, 0.6887054806128257, 0.9596882208483619, 0.6516344173214931, 0.7919901948829373, 0.9225737257139351, 0.813415864872998, 0.948992769860517, 0.6853207450650097, 0.6701230112371092, 0.958573924128074, 0.9279378405436631, 0.9703550136169539, 0.9211311966032043, 0.9318534087532111, 0.712245872911764, 0.987116829582855, 0.9677560184550125, 0.8749252080414578, 0.7891982787906784, 0.8265633654097493, 0.8623298338573049, 0.8089995809884563, 0.9648457716265586, 0.7080087719897789, 0.8839873939905675, 0.8217783159712708, 0.9949091522142777, 0.7365734095311951, 0.7144160224657027, 0.8295522696043087, 0.8844038175624909, 0.6034257843559996, 0.584688355583855, 0.7701831476290281, 0.8723744152040341, 0.5712998213415161, 0.7708436787458288, 0.9270974731027568, 0.7623013580233775, 0.9681766390827937, 0.7951787637858218, 0.6354765612265592, 0.8628034252257315, 0.7004969580372409, 0.5971448500277081, 0.7619132207055563, 0.8062732827126173, 0.6293800642528911, 0.6679001053583536, 0.7447432286970059, 0.5883280432499318, 0.6648886888462285, 0.9919098275818704, 0.6454721883305665, 0.8551260384330852, 0.96892819365192, 0.9992647790919129, 0.6517765592493302, 0.7385113259323808, 0.6928824332974053, 0.7192432989972123, 0.7644417318845556, 0.8520422094382372, 0.792226326599531, 0.8449640749860251, 0.9158095681658234, 0.9372374558159082, 0.5814827992915361, 0.5743005008535625, 0.5858684451407912, 0.6358496207796487, 0.9100675401447933, 0.5843955612726414, 0.7620604959569013, 0.8253898593314432, 0.9462721086213828, 0.9185749740064597, 0.8721959907170634, 0.6503909461233254, 0.6656877572362729, 0.5068126262248784, 0.8528155530512893, 0.9430928401512835, 0.6652012098221841, 0.7980382553310637, 0.6273770214950631, 0.6983865965558143, 0.6021520412298705, 0.7788983545594592, 0.7339058967770653, 0.8082232533609396, 0.6238319790201647, 0.9669195265967436, 0.9822457311069832, 0.6062665729723207, 0.605216102712256, 0.8751600168892995, 0.8182337065093676, 0.5575531987449622, 0.7175055291593618, 0.7836170131470848, 0.9373162797355481, 0.790982537136969, 0.6200243135528545, 0.7983454917746677, 0.7633836164226173, 0.8408622775531398, 0.7888714595295054, 0.7519640156999186, 0.6877016343353599, 0.7906722981731363, 0.6417579796264768, 0.7768827041803019, 0.8820497380987047, 0.5895481786846886, 0.7650256857143349, 0.9920061517944667, 0.6081742699246736, 0.999197396932882, 0.5706831128690832, 0.5725763873415564, 0.8770996842554502, 0.5302838000687682, 0.792217275929586, 0.7743367639798118, 0.8836728200536803, 0.868564117843624, 0.5893825309103851, 0.7961192891254443, 0.9562198406824782, 0.9109815756403485, 0.506328441635111, 0.6802003594876924, 0.8512701149541887, 0.5616015838442878, 0.8493476108595746, 0.6613029658953269, 0.5652684746110833, 0.9691783735778201, 0.5221179694587872, 0.9073163381173976, 0.627246243137737, 0.7179346451971368, 0.9945991025261163, 0.7645935460039308, 0.5387800159027462, 0.7658588707077243, 0.5189598402873941, 0.96484976190008, 0.6044921226861688, 0.8254201321049424, 0.9660598615390925, 0.5690463818491074, 0.6031633992192171, 0.9637500601197808, 0.608328549379155, 0.7506157563737685, 0.7440058344249522, 0.5597773243005001, 0.6286460123274297, 0.8666956030511437, 0.6723504472167229, 0.5249480941695845, 0.9486637302754023, 0.7807664692988496, 0.970570206862918, 0.5813759071441842, 0.8961155368864477, 0.5266578596168554, 0.9348410245884027, 0.9593770243571289, 0.9975634388826823, 0.6110666561252437, 0.9769972330870186, 0.7227803923048159, 0.8194819673577332, 0.8882969732676528, 0.7076315991703114, 0.5530414851344183, 0.744347676524571, 0.594497450346031, 0.5591802736385729, 0.5432897109831825, 0.5588675251201456, 0.8687796197534428, 0.9528396011669785, 0.7842315256776919, 0.9353180793492037, 0.5977285190594618, 0.8288261446626921, 0.795319843998712, 0.8679706848266565, 0.683725407060469, 0.9473139150709045, 0.7285700890082529, 0.8612448099030573, 0.9186458569433607, 0.781836666887874, 0.9110081528811045, 0.948389847009768, 0.8308827260968211, 0.7641714160526736, 0.5687695870054921, 0.8397198568422363, 0.9776849291591985, 0.9075101417317111, 0.9385805519159498, 0.5911416791309676, 0.6515923321532042, 0.6204125407521794, 0.6908862920849703, 0.802377365133198, 0.9114836101096723, 0.669925370291955, 0.7152170617824956, 0.8865751447470764, 0.7089869660264706, 0.5299252129320293, 0.8958854025051104, 0.6027214832699652, 0.9417300091115473, 0.6569422494786454, 0.8686788225945012, 0.5516246645361033, 0.5267589569344293, 0.6748196347720861, 0.6733393954764071, 0.8563800673545152, 0.8364634037231913, 0.9848307893663228, 0.9361242607992672, 0.7001626997126114, 0.8722978356997989, 0.6509092902366528, 0.9466928314712872, 0.5538922012286112, 0.8198884365801742, 0.7502687536246485, 0.8388016588299139, 0.939758462227133, 0.9732580822057626, 0.9941705338170446, 0.6110457522678975, 0.983343860325391, 0.5983059871049331, 0.8796802234461037, 0.9042623410368853, 0.8227489680122313, 0.9855761017031992, 0.8065499566899692, 0.6760744452929882, 0.5968162525271443, 0.5362903914807273, 0.736778604469406, 0.9636213485840475, 0.73191619963761, 0.5596476318819541, 0.8676141025564168, 0.5704183069893747, 0.6782030982823384, 0.799281446606053, 0.655857432454934, 0.5568168652542784, 0.7278040947211806, 0.7346887841472738, 0.6359725283230153, 0.6515078466727823, 0.5000686452476554, 0.7908049012464531, 0.5538837548867168, 0.7290990791843106, 0.9968074875787645, 0.6621444770788969, 0.9242414620330964, 0.8103095239105493, 0.8076833947743068, 0.8891472167746592, 0.604244834572546, 0.8470412770702933, 0.9843473158504954, 0.5779948405024532, 0.9278792321925344, 0.8519513163665182, 0.7658329817372078, 0.9737243125202961, 0.7355063041440411, 0.5340082023618995, 0.7496018230860987, 0.8991691245729068, 0.7267891371726054, 0.8933747452345512, 0.6198291005354284, 0.7764771371611462, 0.95562138710989, 0.6943045011892621, 0.5860425062432495, 0.8380134193478634, 0.6675149862515524, 0.7943271752843486, 0.8565755505161533, 0.5337251728967052, 0.899775733254998, 0.9286892345821323, 0.7550667305125661, 0.9045708782250996, 0.7551171884322699, 0.7772017952349304, 0.9726039974345146, 0.6852302529609383, 0.6188860087590363, 0.8164218744728308, 0.5516002642868958, 0.5125858499711231, 0.9934170797470344, 0.5400047081978151, 0.8446993577299244, 0.5443629182994096, 0.6008909335499308, 0.7937995343911937, 0.7809853005446483, 0.619705821628179, 0.8673803800999491, 0.5089047303597505, 0.7346947389771202, 0.895759030394984, 0.5883166364737163, 0.8019206609828426, 0.645807661031081, 0.9610067940476148, 0.6409373016906703, 0.5890632020189965, 0.5966432605714889, 0.8663420606635275, 0.6542472383647144, 0.5900506320816454, 0.6948881547507954, 0.8710179824980828, 0.7556334981519002, 0.922689865671378, 0.7953903534160092, 0.9357811372754385, 0.5099131653040377, 0.6142064927601363, 0.849004728308443, 0.7527168273772946, 0.7721132544244116, 0.9377253317229208, 0.760303132322157, 0.6275790340770389, 0.7865815347988849, 0.5958143008521499, 0.5930424002663507, 0.9228704000858796, 0.8643501970699207, 0.8219069398799044, 0.6497902591794578, 0.7737100682187628, 0.6999300417816896, 0.7199568269121205, 0.6329478428188232, 0.5387372514485208, 0.6145019539842349, 0.9471977255280037, 0.5199033069466388, 0.8595412017263903, 0.5758344555922172, 0.8176914509990199, 0.9019793287487988, 0.7523398354364187, 0.9826369566098909, 0.600129996906428, 0.9805473361239292, 0.9681203966263335, 0.823761364408897, 0.8275846982160338, 0.6254797225174762, 0.5621820483438537, 0.6926967817645722, 0.5942799523328575, 0.6473912625559249, 0.884188210984612, 0.7806743225891769, 0.7052676421041517, 0.5679980982618972, 0.5549557322601153, 0.7828117435760831, 0.9856942766414128, 0.5207944705952222, 0.9712851623839505, 0.8842022877875596, 0.8735204031002208, 0.7918783413003059, 0.662970182852041, 0.839085822204849, 0.6421558749155013, 0.7840711701534384, 0.7633362657995222, 0.9336332957912697, 0.6337901621853318, 0.559335602118759, 0.7160860742589144, 0.607429856150081, 0.8442134994303342, 0.8568819891476316, 0.6076491270548179, 0.7677322784468138, 0.7364180079235931, 0.7340231528229857, 0.9155256481708671, 0.7802545028415374, 0.9708617845349503, 0.693970340953143, 0.6270365582671611, 0.7471410569409986, 0.5212891662487005, 0.9258701002048482, 0.9788316737053031, 0.9814830128701484, 0.5931470483738288, 0.5473486966538252, 0.5154634437282494, 0.6692743914563233, 0.5823832319505364, 0.7726970116813722, 0.6050018087721545, 0.5385756705190033, 0.6220741267978971, 0.595447701432347, 0.567171882777969, 0.9827564877905901, 0.8639183983269502, 0.8449803779056758, 0.5984158081515518, 0.6672755381043416, 0.6129770251222896, 0.9519509324820585, 0.9443548927537069, 0.930026037935639, 0.632000113744978, 0.9763414509344327, 0.7144332723103214, 0.8789558575127457, 0.5366363845582192, 0.7290195480683012, 0.9578821360381251, 0.6424413127919231, 0.8431090518127221, 0.7003620897303027, 0.579065628043969, 0.5039640265178852, 0.7757588325588183, 0.9764508815691322, 0.9907032216167453, 0.5055892914881991, 0.767900071488195, 0.6996478231356794, 0.7420722563955765, 0.891922984006845, 0.9250226396834449, 0.5462276719142225, 0.7035003974009588, 0.5336917630533915, 0.7152082166392502, 0.7251742708445106, 0.7509989581193668, 0.9301433175089546, 0.7438051383514331, 0.6060079714083072, 0.6238150169464615, 0.738771292286822, 0.8442030650473165, 0.6791100011209934, 0.9734617437765596, 0.8422036949379736, 0.7004438096133097, 0.8482822223172912, 0.9212111675610329, 0.9972072339377982, 0.7582100937690708, 0.8761769909199051, 0.5058702092029187, 0.9332384173215658, 0.5302358045311276, 0.5458152894096422, 0.9768560645703076, 0.9749536332589055, 0.8835146700709409, 0.7256720430818793, 0.8052736690490532, 0.8697131565435992, 0.5114453488127082, 0.7252488608263322, 0.7430935587154945, 0.6875257493248554, 0.602212475128676, 0.6415222954332838, 0.815475766522928, 0.7911746541874881, 0.8777085823554018, 0.555809635806753, 0.6652193238271213, 0.6188384724418123, 0.9778558882118971, 0.7451939526787021, 0.6431924220740588, 0.5947552938090861, 0.6195761649751095, 0.9404660324660787, 0.8944618542043291, 0.8445948267896739, 0.9072348325132218, 0.7451458680084253, 0.6011995378884523, 0.6650616963954223, 0.8180782556503694, 0.7999011737920103, 0.6483912726794674, 0.7411734343608426, 0.8839845360039944, 0.8477541892068341, 0.959515068629005, 0.7278713012213751, 0.8312621970657555, 0.587896671988565, 0.7251999331080521, 0.8772997253147354, 0.7079438760844243, 0.8524693583481937, 0.889994998727714, 0.6780076452302286, 0.9455824324753717, 0.7568994353636314, 0.5889174432805003, 0.9338588557696317, 0.7894634349625331, 0.6438385747510796, 0.9760094080269457, 0.9343700804645332, 0.7294289522269981, 0.756148419408781, 0.8390369692684783, 0.821394106739417, 0.7044517012025596, 0.5720261446341461, 0.6430402528106016, 0.5324854298967597, 0.9924998594171842, 0.9694243150877305, 0.6809347788774234, 0.5579201961590581, 0.6999653149017011, 0.845552480119644, 0.8124155327932256, 0.5132477205266918, 0.9563086688409959, 0.8915275784557943, 0.5906105302720789, 0.696041279114745, 0.5772529733136507, 0.8401603197558671, 0.5298858951687688, 0.6551254216975797, 0.561539912635022, 0.7474132224923782, 0.9871534125926272, 0.7905444225496306, 0.5129888217453529, 0.9121231531462695, 0.5462322459149856, 0.8765891094628455, 0.9923245183813351, 0.565333700093506, 0.8322372200402326, 0.5970147354131371, 0.8385331467611374, 0.724919431763428, 0.6236745205371024, 0.5970401247566768, 0.8186057783949316, 0.9804893535857764, 0.796460057382139, 0.5416327666319141, 0.7319673422386923, 0.5368046005825864, 0.5813699144487263, 0.5655763076059662, 0.9381705167896895, 0.838818996591592, 0.6950337666512321, 0.5762144692251068, 0.7198391186689008, 0.918656201320061, 0.8970031717431219, 0.8990761943986982, 0.5596584811013245, 0.5854678366731532, 0.5503784438570838, 0.7838309580586269, 0.9174478468090642, 0.9567038430007015, 0.7300472761105503, 0.9805471290407534, 0.817393489086906, 0.9522644870533603, 0.5442715500804288, 0.8658475372258978, 0.7337407975994914, 0.7194094451562902, 0.6393270683825731, 0.8266150915832722, 0.776657754768442, 0.6016735055321512, 0.6699759829932318, 0.5044946224839104, 0.5457717824126062, 0.753046656136402, 0.9444728266588731, 0.8095631104735315, 0.6098818192457014, 0.7901678007478921, 0.803997916333908, 0.5956739831469664, 0.6722588859631944, 0.5502851200461796, 0.9763601222981373, 0.8770240639126494, 0.5114049169088932, 0.7129608373789912, 0.5601351195970135, 0.5827438429918719, 0.8735380556415737, 0.7340878564402242, 0.8139492015896806, 0.6780680805361561, 0.7537507191726938, 0.9351783567916053, 0.8278971121633522, 0.5272513487781207, 0.6724917319567842, 0.5509020943961771, 0.7047944146439766, 0.6540969846404678, 0.524538414954916, 0.5451320741322174, 0.5733207694321104, 0.6590975478103342, 0.69790888332778, 0.8986211718900362, 0.8829463265040003, 0.9129021685650457, 0.8958406651819171, 0.6460508981267963, 0.9964506222912196, 0.7958157178320893, 0.6291682692770957, 0.8921978317555672, 0.5160922633160063, 0.7166444224995214, 0.5237136055351799, 0.8334728843512316, 0.8237705225832834, 0.923501782095046, 0.5986235672130813, 0.6430617379905922, 0.7006217410479396, 0.7562152239639102, 0.6578790714274985, 0.9318747248096103, 0.8576669598377575, 0.6002176602870912, 0.5073553267851922, 0.8825043644842356, 0.8744301101760703, 0.9225245194866969, 0.8420770356372534, 0.5479676417285753, 0.6164366820898666, 0.7570384765291585, 0.8257340703121155, 0.7599362235141864, 0.5065550755875443, 0.7380035749004414, 0.5837168272814646, 0.6135643512092867, 0.8498237722800006, 0.9213172097178708, 0.9458553266982935, 0.9026281564998072, 0.5022506703544083, 0.9699448623456928, 0.6906709371440081, 0.8473972560954269, 0.6295943998307648, 0.8717680057294135, 0.9747166801369745, 0.5595044534715949, 0.9511126666445299, 0.9047119932639807, 0.9868744132207312, 0.6559957457330656, 0.8113904544495354, 0.7735628470083056, 0.9358904300033136, 0.7571914346024271, 0.9927957185128167, 0.6306507204676662, 0.7514330850924232, 0.6417818178409717, 0.9255151640148536, 0.7562847358809509, 0.8668699321394504, 0.6852489483773003, 0.8793157467785429, 0.6458319929114544, 0.573929955491548, 0.7492309230921541, 0.8939810061112827, 0.5861806368619327, 0.5162598059985956, 0.7082458911107726, 0.6180028273348575, 0.6450299782519979, 0.9171716302889824, 0.8124390634206304, 0.8275334925349394, 0.8080074658854361, 0.9979156687121333, 0.9846907222936097, 0.5777377738164655, 0.9231122011052622, 0.5590950577432684, 0.7026427375016112, 0.8315577656527, 0.7474472802187326, 0.6035979685278109, 0.6556446642242835, 0.512785035149196, 0.8460816833681668, 0.9747423803243317, 0.7911232795239492, 0.5499217528076299, 0.8285126474646913, 0.8341278006246383, 0.9130590406938148, 0.7777862504029252, 0.663509015406857, 0.9842260846094704, 0.7018571281052195, 0.7045679683522672, 0.6917859734937342, 0.6254901246620483, 0.7465028122150963, 0.8040013361338898, 0.9175367755819696, 0.5830897551637492, 0.5509766501317258, 0.9489881822846149, 0.7796932708958798, 0.9370362731305919, 0.8994230993470766, 0.99307507568641, 0.5447097778014195, 0.9769962585061944, 0.8266346883644631, 0.5006712795808547, 0.8957451252078883, 0.9103499975463555, 0.7827508711999019, 0.9538458760446562, 0.64893573038114, 0.8191827795354086, 0.7441820351724975, 0.7703527947733897, 0.9257734869983234, 0.8084752343550106, 0.7319367175455773, 0.690029648477877, 0.5298633136548956, 0.5462724830988968, 0.6580553241738174, 0.9912722094165247, 0.6221057168621316, 0.789668623884626, 0.7355361871866353, 0.8138839066198622, 0.6507915482542166, 0.8629638215062921, 0.5271060942861767, 0.6244008402794485, 0.676498032052538, 0.72895474706462, 0.9774352430075932, 0.7952096178581665, 0.9944249185146229, 0.7178654814494182, 0.9966663510767555, 0.5214105099237232, 0.795117494837063, 0.976591460461985, 0.9593948411586474, 0.5714591462452955, 0.7789510499431094, 0.9680486028928, 0.7918913269437908, 0.8295129971642379, 0.5046989812488651, 0.8056414814274389, 0.7103202468683657, 0.5058791343340472, 0.5144334209978454, 0.6686289125401228, 0.5720259506565545, 0.6820763026610477, 0.862621130758511, 0.7414720760565694, 0.6632457997141316, 0.98037855777554, 0.7387772594157296, 0.5754523689998166, 0.8714126543220012, 0.6287204937866904, 0.5409004439400739, 0.7647289300562861, 0.5666768548486394, 0.6199871465094358, 0.8809590304186179, 0.6414647530675899, 0.5392445793738048, 0.994028744227816, 0.792657915026832, 0.5291318885989175, 0.8105458957315578, 0.9294693058347192, 0.8876442052290534, 0.6409453507345393, 0.5178722153238687, 0.8091991388981701, 0.5132989585464276, 0.6370437361677792, 0.64074517651477, 0.9160892983765707, 0.7569012920080975, 0.9648549325283411, 0.9591808036271094, 0.5907486683689356, 0.5708671656887871, 0.8798709294357394, 0.9324726231796894, 0.9346174441053393, 0.7590646354884953, 0.9583402474246262, 0.7916971009135567, 0.9053724536101235, 0.653645779538449, 0.978053575352319, 0.5905165484315604, 0.6501455241998173, 0.716945359094298, 0.8086038963741343, 0.971032504597825, 0.5569190666561104, 0.5622913232568565, 0.5889896952809448, 0.9064351508826164, 0.6553937222789008, 0.5803407848927453, 0.9216387422938602, 0.5097355452536785, 0.7382970260693771, 0.796178468300532, 0.7869681164169223, 0.669142315230784, 0.6358696041894161, 0.56160595121595, 0.6191941414758719, 0.7520839165492743, 0.5188603111689393, 0.9073452763987215, 0.8397788818196545, 0.9152440603244163, 0.7387305034625775, 0.5202766128977998, 0.8990228233116269, 0.5528583375153835, 0.9023655914742497, 0.5513659977991408, 0.8924468217644991, 0.9211174564972089, 0.8167206519662946, 0.9386936753517023, 0.537177040505386, 0.8095915835469716, 0.5494556288325593, 0.7397163713107153, 0.592580091608851, 0.9322811074608024, 0.5864061669696488, 0.5177363028093162, 0.5896303541646862, 0.9535658193091422, 0.6783439163074698, 0.948062309226783, 0.5810296051252538, 0.5429262489107709, 0.8814978162792311, 0.6844196975032989, 0.6516988103811462, 0.5454739125992032, 0.6647767690754505, 0.5306614772176993, 0.5730298070624753, 0.753074121934197, 0.9994990098815091, 0.5001134097940839, 0.967706796622857, 0.9131248875676283, 0.7202335282743132, 0.7598766618114785, 0.9072583434469452, 0.8019033181439346, 0.8998610527948911, 0.7237393666145113, 0.9881148912926256, 0.7524237752089138, 0.6866976812095869, 0.971337230347452, 0.9453357056020155, 0.9489069828905826, 0.8060978547878884, 0.5699920137465729, 0.8943752208296756, 0.5201287694746346, 0.7948663960416289, 0.998928753232134, 0.9640429499363918, 0.5411170322093675, 0.8721944945920755, 0.9063624324890218, 0.6080338706442721, 0.5411734296769266, 0.7690143186218528, 0.8098463516421018, 0.8739269796659299, 0.6436066746222682, 0.7321156345453628, 0.9298384686771402, 0.5986178633012758, 0.9140287587645872, 0.7775066770384842, 0.6484813334101405, 0.9382939490781668, 0.6801459127555628, 0.7288336250022417, 0.5175154238625103, 0.9448561870824893, 0.8970631486623235, 0.7938322720547379, 0.7881568132332419, 0.8752071864550053, 0.7698984938180014, 0.5757975209333235, 0.572610779694349, 0.5877912269574644, 0.7259311101020345, 0.9159395593163915, 0.7549315181573566, 0.5069913013553122, 0.8304194039249647, 0.902969844968913, 0.837905670355026, 0.7333185333082317, 0.9080195400979283, 0.6477242597564518, 0.59182126792435, 0.976323629691564, 0.9679570658287533, 0.6624532714275224, 0.7397691314684631, 0.8653918050567403, 0.8136708840536383, 0.6443767605478452, 0.5003259966173008, 0.7564286110380961, 0.6631366214305634, 0.6208496703239124, 0.6473661782639125, 0.8331037302511687, 0.5358238316142165, 0.7129487398093723, 0.6328191979697364, 0.6358841837354785, 0.5960847616932881, 0.6989320094243776, 0.9671908966334245, 0.9863464556088242, 0.5101681584929578, 0.9269844998520675, 0.6216344570774411, 0.5371060462986181, 0.9389528776117206, 0.7961345537598273, 0.9440279947430749, 0.79376775545835, 0.8632157331829153, 0.5759165008363596, 0.6033378491305945, 0.5576388296790791, 0.7868137971513787, 0.6315162896814759, 0.6632668234801937, 0.697778935149439, 0.5982477870771645, 0.7983356430499949, 0.8123280397025884, 0.6649746536716156, 0.9480545853279083, 0.6108979007726567, 0.9821021914059636, 0.9921916303542122, 0.6830811305609689, 0.6861169161593145, 0.7933920503130898, 0.7063808339023825, 0.8190125501046603, 0.6791940736043598, 0.977908978561784, 0.5363417427793209, 0.8910605747514241, 0.7366970461112186, 0.6127000729916179, 0.7448180968760343, 0.7584215489833537, 0.772363098340479, 0.6548653025836059, 0.8084957598223872, 0.5084163734360586, 0.6613664475123517, 0.7636424310356631, 0.635530448906945, 0.5584286905976932, 0.9759127255448106, 0.5285626383961926, 0.6954377492382187, 0.8438875795374488, 0.6380730782810475, 0.9120241130716307, 0.9879028214120056, 0.7299182532245484, 0.9636559392126565, 0.5775256070598442, 0.9058561429531095, 0.7835105131542026, 0.7778949121382984, 0.8747370334933514, 0.6864536701997771, 0.9201138610921709, 0.7373234810426442, 0.8056138290890525, 0.6792556652786215, 0.9487775938553176, 0.9654069746092575, 0.751151311962357, 0.7856629741360925, 0.7418765751257317, 0.9447007330580532, 0.9960699493204659, 0.5632599610254823, 0.7961105849322638, 0.7561806634975203, 0.8917677731737067, 0.5444041935492676, 0.9680873925532133, 0.7825106026278843, 0.9963855108145998, 0.8137349108470591, 0.8588974259100213, 0.9345011214170066, 0.8832711013901591, 0.7428337512159123, 0.8068810781018854, 0.8898621151564587, 0.5853777440177184, 0.7766007006332871, 0.9100566272471972, 0.8634232003759482, 0.8600211328969949, 0.8098689362147675, 0.640986273883789, 0.7000778014732041, 0.9658578755803624, 0.5450041308232897, 0.7246297856115569, 0.9923669714973984, 0.8187335315931912, 0.7068450752244306, 0.6539609356965435, 0.7188767866327692, 0.7330970300170034, 0.5779021006800484, 0.6799566516541508, 0.8646892160389315, 0.5011508056405751, 0.7313375460563163, 0.9878268598988857, 0.737445465578654, 0.830990828143181, 0.995176504713642, 0.6090403134558111, 0.9036906900452604, 0.9777524102655442, 0.6674360570850806, 0.6513888894715258, 0.9402774145032986, 0.7540571873612272, 0.5655249012218047, 0.9000114854176331, 0.9298876781821885, 0.8268963092949935, 0.8369630320240591, 0.8151817559014589, 0.8166608053894842, 0.510718415948004, 0.9161009222717525, 0.699818136187525, 0.6115870198600464, 0.8589313515295047, 0.5857461468832403, 0.600336892901308, 0.5801693404562553, 0.7082993303561882, 0.7825331769045305, 0.5424452682386324, 0.6485707484519981, 0.7642829661737807, 0.8616582659619376, 0.9296054448207317, 0.8233497204858753, 0.7170629556144286, 0.7604702357886233, 0.868092882466613, 0.7531394293491692, 0.7549533767687124, 0.8703838378877566, 0.7170964400819775, 0.8563437770318768, 0.9335954996864932, 0.8005457753157033, 0.8298916891564339, 0.6338327507842966, 0.6124640058254995, 0.7883743010868898, 0.9177814730511179, 0.7717297824719384, 0.742281951079655, 0.9794595976689229, 0.6819427587682136, 0.5178531723077794, 0.8778721043332283, 0.7703050666099576, 0.5965435270332897, 0.512991686509092, 0.808690985035728, 0.6284958975951214, 0.8234123803598199, 0.6957619482099913, 0.93624039892848, 0.6791465074318582, 0.9616574370376214, 0.5534894026632293, 0.5232072683602569, 0.5635017131704805, 0.9959620378208955, 0.7600154418513103, 0.8630110612560064, 0.597703284949842, 0.8051622999134341, 0.7304623437641053, 0.7810179107302051, 0.5117552518665003, 0.7863461203788792, 0.505780885269259, 0.9648751454613649, 0.6105000309802745, 0.863370136304894, 0.9674521047472769, 0.5748832641456463, 0.910821955116339, 0.6588169520389486, 0.8697709879920212, 0.660178856153621, 0.7421520474121703, 0.6251836364030954, 0.7890660965345964, 0.5764343064965345, 0.6154355661711252, 0.8463589051235896, 0.8127677973625811, 0.5999723306354792, 0.643658797083226, 0.8991500365140219, 0.5158415261559002, 0.7267007722214085, 0.7175562904399988, 0.7896545492186782, 0.7887674189454119, 0.5694702342108424, 0.7446773984592003, 0.7573253009288823, 0.7634188514719037, 0.7664712124318107, 0.6900872627721285, 0.8002547127713775, 0.6224343759328825, 0.8045667123177545, 0.5525819792359106, 0.6509661691810584, 0.6712575020482134, 0.5532775679467444, 0.6747020937862472, 0.8839480923183163, 0.599421456700505, 0.5956648220736389, 0.5587368183256761, 0.6131471953395963, 0.6191181551356093, 0.8182348891901736, 0.7113231565135372, 0.9514073541913952, 0.6559549622625626, 0.7817250975133876, 0.8650068770325149, 0.8706520992937342, 0.6973964142973288, 0.8190359725375034, 0.5888049890244229, 0.9538604439871857, 0.7414758509616142, 0.7089257614286317, 0.9717670097709377, 0.8638766368085564, 0.5316259878848997, 0.9739551051962426, 0.8346712042312796, 0.734893300422926, 0.7610745213372647, 0.7028347014672934, 0.8452971481015641, 0.6112816852241229, 0.5506251394165497, 0.6600821361794398, 0.7155354055153877, 0.8717135894905568, 0.709204403877712, 0.6734705121129304, 0.6111904645577256, 0.6320601617336148, 0.9323276487835788, 0.5108234159480649, 0.8412188755832721, 0.7132403881174709, 0.8529284922438392, 0.8412098084563937, 0.6624280333946915, 0.5580704342624663, 0.7938888215453289, 0.6075076995338687, 0.6096495184715669, 0.7957887155866106, 0.7668087861136399, 0.7374664259621262, 0.9279982783209729, 0.9159721070415282, 0.9082026953690611, 0.9769768245181893, 0.748636605072422, 0.5462080458166753, 0.7218676709915296, 0.6566638985617975, 0.7096989161606116, 0.9937125122253547, 0.9744896995873676, 0.6963222569464942, 0.5134578239415891, 0.5611934021611436, 0.9022381010957163, 0.9857481919748241, 0.9345279184711544, 0.572818988442697, 0.8876579626989434, 0.7389345235267571, 0.9380219727897852, 0.835237891655137, 0.8561365733454694, 0.894440668833046, 0.6426931275700198, 0.9471877091444312, 0.6451864275721664, 0.8196268218637554, 0.7203566595547943, 0.6744324358730331, 0.9797179106863233, 0.8669890104619313, 0.9284242845667803, 0.8109465400083249, 0.7541978654200987, 0.9880652067260531, 0.5419287542154259, 0.5146517765004776, 0.7559330575015037, 0.5380532029158753, 0.66663632864479, 0.9620513246400995, 0.7822057849878608, 0.8942416817572902, 0.9923740222115582, 0.9578506379082291, 0.7930448760525064, 0.7714950205391576, 0.931787679697216, 0.7872830670393858, 0.8016210628831963, 0.6658091820063575, 0.951022408017939, 0.6555568506056542, 0.6979369831393718, 0.6038654995949195, 0.513427136022502, 0.774748979801714, 0.8200444365986466, 0.9046295431206678, 0.7061613114161873, 0.8989245742778331, 0.5514180337962352, 0.7484579442273982, 0.7348978082538278, 0.948332940868498, 0.764892188797945, 0.5324953201292884, 0.8171481188141136, 0.5578163250881172, 0.6599847581133069, 0.8846191218518282, 0.6328651822440847, 0.8199755701895026, 0.7633297178575498, 0.5648510694898907, 0.791963814568683, 0.5663846097729586, 0.5685912474456254, 0.8554238330955537, 0.8011166304163029, 0.5473285441450836, 0.7866516514781688, 0.9228914443485154, 0.5759404363545362, 0.8705876951669993, 0.9023079046707028, 0.729682708011439, 0.7437691304032445, 0.6367839525280053, 0.5548727903907651, 0.8196091131601864, 0.9603512009512339, 0.5807169062570352, 0.862928199438639, 0.806541409824657, 0.5637517718570656, 0.9797157392645801, 0.7138540629236845, 0.5391028090179104, 0.7497321307017504, 0.7576744657369276, 0.9569691384053224, 0.6819267404755329, 0.9265020553412391, 0.6355467548901937, 0.9143092431037201, 0.9544447639413376, 0.803161993947278, 0.9472904512282327, 0.8672325478957397, 0.5542686621327595, 0.5732180629073106, 0.5354203006176698, 0.5228215246957268, 0.7995229362161893, 0.8869200404285922, 0.8083191948372388, 0.6838978608092179, 0.790063874344282, 0.623558150387501, 0.5769500659022602, 0.5245999518649531, 0.8543820455856732, 0.6071237898552959, 0.9531872976576119, 0.9159355303921055, 0.5664830767899578, 0.9628878924802804, 0.5251990786198633, 0.6311637760170613, 0.977634447226198, 0.5378683468147285, 0.9608440984584818, 0.626806864616311, 0.814555188649581, 0.7242154949377054, 0.5742998365556391, 0.6624953099373272, 0.8829403111418559, 0.7628751318502733, 0.9735498577266157, 0.7252919185466171, 0.849975118508629, 0.6418030320284317, 0.5541173227974752, 0.8366895959248848, 0.6088627836798127, 0.7469754805208385, 0.7072488831379831, 0.9783490023985337, 0.6955468981855961, 0.5129714749832937, 0.8101484147898315, 0.7455784354493711, 0.9594194086276243, 0.9962259232255255, 0.9651164229636484, 0.6928095343572437, 0.538962326254238, 0.7332876737553184, 0.8663188728314561, 0.6065994370074994, 0.5116571844802178, 0.7304222733695209, 0.7161710074223175, 0.8390901346847497, 0.9313752578330527, 0.5076349391737252, 0.6038296794079221, 0.9799096227202628, 0.6767080551225046, 0.9940941671739913, 0.9972578750361397, 0.6457847060714412, 0.5464609522041712, 0.6732862003451926, 0.6401210000483359, 0.5414962282064197, 0.9592243031953914, 0.6922995375396317, 0.6661803192118264, 0.7658385829932386, 0.6432554539621163, 0.9858775406278485, 0.6877980284219982, 0.5077793465393678, 0.8310664471880758, 0.9168808968953375, 0.8827939244131747, 0.669849810441075, 0.8710691152541598, 0.597125321757807, 0.7071095353986014, 0.8961302137027857, 0.7030317685718277, 0.6052857912038123, 0.6800624171180985, 0.7636685321366439, 0.9194471454957212, 0.6301171094511084, 0.6474237896179962, 0.917171659235374, 0.8980330928977023, 0.5334943377616065, 0.527165707240147, 0.6605350733096556, 0.6245343661710043, 0.6056582465625473, 0.8722062573609313, 0.7646046167979292, 0.5700127525965581, 0.8760302332637804, 0.5776497931908584, 0.5825121066014792, 0.8441556760603387, 0.7119620731612893, 0.8138825085183061, 0.9084379788114474, 0.7288039962994917, 0.8760343015557351, 0.9040477866819276, 0.9710647026425021, 0.7671134694593194, 0.7376027099014548, 0.8777811222771567, 0.8329881409190278, 0.6375598569085397, 0.8778782241219102, 0.7867926265536085, 0.9661382060756657, 0.769732609194673, 0.6122009435519457, 0.965484059109234, 0.5697053433248236, 0.5925089213130612, 0.9983995615500316, 0.9461762256563041, 0.5539106616733447, 0.576498309668352, 0.562178206681162, 0.6027280367333165, 0.6386337388250825, 0.9325566399695127, 0.549018799308062, 0.5159524348862905, 0.7044120379756602, 0.9694674305103554, 0.5152765860708828, 0.7230101508688378, 0.8767745313490868, 0.7680451757775026, 0.7452701147615081, 0.8741774042860948, 0.640259206970355, 0.9964835235673875, 0.5966216044909063, 0.9976628296702508, 0.9450316428703989, 0.8816241358772796, 0.5619799863865178, 0.6542911021680827, 0.5736669642193328, 0.6929318552150825, 0.6357558630850796, 0.5222364661283267, 0.5452984019142897, 0.5662811514963241, 0.7885760941228825, 0.6132444735065704, 0.7278754122240811, 0.6202802576406735, 0.5784020082378134, 0.6975336752592799, 0.9788099835257953, 0.6390760767778116, 0.5596679993842447, 0.9387202859533681, 0.8284231094100161, 0.695040610788033, 0.7081870462888727, 0.9813246918127878, 0.8866733911793414, 0.7725253548554022, 0.7476581494257717, 0.6543700687955671, 0.677940844146041, 0.7951794878216183, 0.9070873681077269, 0.6615211207648771, 0.849399192161766, 0.9111623958347791, 0.7747943508693702, 0.9355658051727225, 0.9990063786906356, 0.8403540207686433, 0.8351410977048668, 0.5596878256732586, 0.9184599051431153, 0.5295462024756541, 0.7262022462845206, 0.6634737515076814, 0.5025941198041411, 0.5256343548311591, 0.8278410218795647, 0.9458364551830196, 0.8708833548780529, 0.8855776252237395, 0.6776600060042937, 0.7123767970078283, 0.5087456545430726, 0.6483171224704899, 0.6743131496848528, 0.5052896230245789, 0.737218555120611, 0.6236932256553874, 0.6393318210500594, 0.503418389249122, 0.6982973247345567, 0.5096285552797022, 0.6923698604982349, 0.8195848683255782, 0.7609425159266053, 0.7938850849991217, 0.8530056978066796, 0.5312629360077543, 0.6393838873521702, 0.9656459908783785, 0.9178617572632475, 0.5529206710412817, 0.5076010457136242, 0.5906523691755314, 0.7415616809307972, 0.5856268042039123, 0.6051137991246114, 0.5345255583453699, 0.9720222993010386, 0.8639900469576218, 0.8829632979219306, 0.9229798767274846, 0.6250813224484044, 0.7299816092567051, 0.8104322257394847, 0.8486143307136544, 0.5197937460773023, 0.9919311192096378, 0.8470759221668724, 0.5936833785206501, 0.8371404681372597, 0.6578134127881287, 0.7220364525710066, 0.5963576779091398, 0.7367996521674112, 0.9434881667047679, 0.7344942299287895, 0.5384192703907209, 0.5114988274939078, 0.9617802668775373, 0.8571770651813574, 0.6878438685123178, 0.7435931731276658, 0.668747528184958, 0.9446609110796875, 0.5285727118039819, 0.6702947230316831, 0.8574076647251584, 0.821022847217759, 0.8541277237062677, 0.8882292054365788, 0.6575279464466643, 0.8177809896957382, 0.5734773788432004, 0.5313140631534015, 0.9772616789066856, 0.9069095432771609, 0.9435219899765006, 0.7983822461490123, 0.5863902468043174, 0.9405163947070144, 0.5919870738451853, 0.5724388677129268, 0.8943350606626034, 0.7713101215478038, 0.9697261996544944, 0.610961770948301, 0.9193363695698751, 0.959583629171213, 0.8813425382792119, 0.9061120309737645, 0.8176107770217361, 0.7371684952429556, 0.7960439928013694, 0.569525920808873, 0.8574202936217823, 0.528697901847905, 0.6136142061997423, 0.6873377339410665, 0.86963510190823, 0.6808670734879264, 0.6979565039816433, 0.6044661738795543, 0.8535838461375633, 0.8913959681736382, 0.8459587926814323, 0.640538548770478, 0.6167233493551189, 0.6628203273741466, 0.6133152101678102, 0.5501619725854809, 0.584585891186839, 0.9655411754490364, 0.6780061717231618, 0.6133906184080238, 0.594994069122587, 0.6081189490148329, 0.8081679874667397, 0.9478494300514286, 0.7104319218246943, 0.6000451994333695, 0.8137680216086305, 0.9163882428969182, 0.533311717284424, 0.6391480478845921, 0.5406251528597419, 0.7011124544184029, 0.5111674964330938, 0.526870308075373, 0.6727867889497823, 0.73187435913709, 0.6274096798264364, 0.910582724572623, 0.8310655537152647, 0.6058357511383758, 0.5918703424244817, 0.6809011268485952, 0.5450451919472226, 0.8525496258475271, 0.9493405691351027, 0.5112613585461061, 0.9650275792489972, 0.5226584109017349, 0.949620101753273, 0.6690386388837253, 0.9275570893121332, 0.7672580361639545, 0.5255647625227428, 0.865573406080496, 0.8805592295716941, 0.9547633534256459, 0.8228863645322694, 0.8082278233307036, 0.7677753113193886, 0.5809337918662758, 0.838905717753517, 0.6493935606460551, 0.673967385107581, 0.9989890324340597, 0.7487454251672918, 0.7911689027052096, 0.8509912782088329, 0.6829055206879997, 0.8482790412707396, 0.8929051511721974, 0.9051358559135799, 0.5574812060020334, 0.734883745878369, 0.8343533544502393, 0.6598573002488348, 0.7919616485442904, 0.7279682654754621, 0.6534298632997673, 0.8880661449117893, 0.8293151125834908, 0.9330120918222602, 0.6903324138643983, 0.6371944990875766, 0.8453380568277065, 0.601740795323724, 0.6632248020491309, 0.9952108360765033, 0.7478543950189396, 0.9667305273764651, 0.6851613807177774, 0.6269312104250713, 0.8320344661759531, 0.910330839781929, 0.8608051659404599, 0.5990185985491058, 0.7563616309560731, 0.8557572600615417, 0.6580889123210236, 0.7854032746248968, 0.5805352938364533, 0.6123926625199749, 0.5005843603745732, 0.8601418199371167, 0.7998868394418279, 0.6465389494444781, 0.876781188278911, 0.5656376802144873, 0.74397557897045, 0.9535138755198167, 0.7748996193998652, 0.842334794174034, 0.737399038037885, 0.5276181800423503, 0.57794659696356, 0.8005941790852902, 0.763151219947436, 0.6618397871272007, 0.669851320836627, 0.8357466787630119, 0.5716290630006965, 0.6648423307934803, 0.9769182941410308, 0.8459743041152992, 0.9593565351219565, 0.753315750857677, 0.7174673790920385, 0.7723776814581321, 0.7158038327750256, 0.9910939926896215, 0.5035589962269182, 0.6524404971878588, 0.567243765143006, 0.7130404937925108, 0.9559797380455091, 0.8944734449823866, 0.8665059762324254, 0.9320960517458838, 0.55434804382629, 0.6206311358527425, 0.7593235047988101, 0.6141005172328056, 0.9203405422453191, 0.7606708713255045, 0.857587339348113, 0.6250566238529002, 0.8040487288490686, 0.7505337895448285, 0.95932314803638, 0.785848099200076, 0.8848544571849364, 0.9781240101141977, 0.6466637889152779, 0.6193647868800967, 0.6431110248099525, 0.6481058844716772, 0.781069284931083, 0.7086820662788142, 0.6463685714015612, 0.9543485334418889, 0.6454543746746059, 0.5791425277139527, 0.9925985539573949, 0.6709651581515013, 0.8974376344520111, 0.5059726491433705, 0.9003839755643528, 0.5162958478713187, 0.871795455634396, 0.6896390410965261, 0.5824445053459193, 0.8072006679630147, 0.8865878959107507, 0.5335108055651641, 0.5764742707580659, 0.7954256402419004, 0.6342804023849034, 0.9449321663748589, 0.6727236712838394, 0.8716539207359272, 0.8799764273664472, 0.8863467585298413, 0.8037967812343405, 0.8997995305818131, 0.9912403615909395, 0.528881340361478, 0.9085314954829039, 0.5660571221319128, 0.9261539658157423, 0.7264969337403178, 0.911976555003035, 0.7537899327965893, 0.574827767478872, 0.8961539187730245, 0.9210645491080596, 0.5212822480734314, 0.8472938030294361, 0.8766471033518755, 0.5984232713416611, 0.6258039052394054, 0.7603694258605671, 0.7373748279474959, 0.8432476810850273, 0.592861421118258, 0.5746507676757471, 0.8888920112712375, 0.9387404014396082, 0.8931648947206053, 0.8584652869135685, 0.7971942132469696, 0.737569610818419, 0.6973039083680002, 0.5407068171851774, 0.5801972274634357, 0.7190893855851099, 0.6592808948523268, 0.8103092649121717, 0.6642744310201197, 0.7349050892850548, 0.80466661379532, 0.6086606211500099, 0.7916173856842905, 0.6486411861881305, 0.9425093764010632, 0.8003557176576109, 0.7419992098435242, 0.6137367477823389, 0.7751174644817276, 0.71996734801993, 0.5401224486864129, 0.5475402952711411, 0.5014619236445146, 0.933680098944397, 0.814312035018963, 0.5964913822125227, 0.5519853224012385, 0.7414317323785847, 0.645638314053669, 0.807764542789521, 0.7198208899973466, 0.6547552502937455, 0.7014249791211024, 0.7586010191687566, 0.9803700529616336, 0.9118368460240308, 0.8899090258114873, 0.8333802225774634, 0.9179092075118642, 0.5368114863059599, 0.5743791516580754, 0.8351697713718571, 0.6750976565519731, 0.5431724380062546, 0.9451147560507491, 0.9480303740984193, 0.6679113598777755, 0.7828708528509385, 0.7010786148607424, 0.8154181456349906, 0.6817691722709603, 0.8343285526121014, 0.8169586731129361, 0.7078200484692788, 0.5530980230832969, 0.5404532709774785, 0.9830642849404332, 0.9017876442373725, 0.7388359714249984, 0.6718956799451967, 0.5017501914855163, 0.8818033983714011, 0.54228442662125, 0.828193779489183, 0.8680948590213118, 0.9984724980820026, 0.9666140130052019, 0.8065875576140225, 0.8036625037560062, 0.76090716021359, 0.9138969358358265, 0.731713716564278, 0.6627811739970371, 0.6994207983722146, 0.6809396054127974, 0.7570370026346759, 0.9917598479638204, 0.8251510407383245, 0.8063429910811128, 0.7429506093230392, 0.7041390489183952, 0.6173126323921273, 0.8672053682832521, 0.8986117610966773, 0.8883996150042157, 0.5248602648450891, 0.59913196660872, 0.9586735810812557, 0.6136326177416296, 0.6770886890199436, 0.9225872252874547, 0.8212078824195077, 0.7799697378531039, 0.7374126816295539, 0.5077452728974081, 0.7862291156808021, 0.9880499640176947, 0.8093112032360109, 0.8545281361674899, 0.6070914984688917, 0.8797526634246682, 0.7940477626426605, 0.6238721218983896, 0.5953739406388523, 0.6706389126386847, 0.6240250459346209, 0.8073425663316451, 0.70934333896997, 0.9198808276290269, 0.9335874009486275, 0.9571981435653902, 0.8578702638103793, 0.5064336577554474, 0.8365956363437586, 0.8570729843104329, 0.6908616589752838, 0.8935136471001419, 0.5577753159602633, 0.6590971167389044, 0.96186358039419, 0.95232523360429, 0.5722042735897315, 0.7332465168372113, 0.6543165781658764, 0.7855822626995026, 0.731833779763986, 0.6424485121404478, 0.9291583562683646, 0.9433625783323077, 0.7761056817875004, 0.9297636813489742, 0.8494230386957763, 0.6656566634192898, 0.5213154013499302, 0.83129494273893, 0.720127345892152, 0.5070348614590474, 0.875928798552973, 0.7711290371527866, 0.9733989823175562, 0.7210170201484603, 0.9923820901992415, 0.9551546289442778, 0.604122420912772, 0.9904178094673084, 0.9628923978738297, 0.7260562706598744, 0.5620353895550882, 0.7922171349164244, 0.7113053174799837, 0.6182151257442057, 0.6871591369734193, 0.8240497827180642, 0.9631464853175777, 0.7332887276951332, 0.8680080705143477, 0.5494784321323192, 0.890450649398673, 0.9720829584910329, 0.5168157385554499, 0.6882147383745605, 0.8118669762636845, 0.8950926696454509, 0.7407586855293162, 0.8043253940427225, 0.7302200358133455, 0.9072053650499967, 0.9698749467912425, 0.5121668498138384, 0.6038904854121041, 0.5832900927366415, 0.9038499613695358, 0.651318523839245, 0.948896684425093, 0.8253979212176197, 0.8102718224705656, 0.8763922579603345, 0.6420037746551381, 0.9868470972105456, 0.7285892342455416, 0.5274045910077813, 0.692947733575934, 0.6465996776171321, 0.5365644048168339, 0.8505185095133506, 0.5163562783006876, 0.6659430726640723, 0.7264001988819355, 0.5404085962227889, 0.8812599143757118, 0.5007561884096584, 0.85396336904042, 0.5641544295041958, 0.6098203686452498, 0.5416357048934515, 0.6858034603772085, 0.568763468288051, 0.7427310323036529, 0.6512454198630359, 0.7637638202137378, 0.8273257636881075, 0.8526862564304313, 0.742839691629497, 0.5687346403800076, 0.7036001973180223, 0.8000499701341492, 0.510424495807041, 0.9672001075279146, 0.9128623403132652, 0.6384304414878882, 0.5292022610268451, 0.8974573738220816, 0.5164743736059437, 0.9659557380422552, 0.8858255853950756, 0.6230455316143237, 0.7552055540992535, 0.5370254416239015, 0.9558612904828476, 0.7555777970040585, 0.7494173091579286, 0.7446436863689547, 0.6913841571507724, 0.9053675014738637, 0.9428861945680782, 0.6878692720672449, 0.8707907187438365, 0.6910807678245845, 0.6120954121326903, 0.5753399032101388, 0.5238409229568036, 0.9465849813960344, 0.5036996590971701, 0.7402705868338402, 0.8369184757697296, 0.6357456826406034, 0.708663489591698, 0.9659273696864199, 0.7402693202676498, 0.8018265329951759, 0.9584158898203003, 0.6778873784920787, 0.9925140042370935, 0.7880492420380076, 0.7180493593025692, 0.9344649526772344, 0.5679685597239943, 0.7197673728144123, 0.9841198425323539, 0.9749995662108167, 0.7907418816106373, 0.8343608059595846, 0.7538420673180902, 0.837849661516519, 0.8943866416060273, 0.6833686756163105, 0.5545535664363768, 0.6083895752324489, 0.9612495251431834, 0.7452042824431017, 0.6970265092851099, 0.6869248604196775, 0.736887615973997, 0.9801903621880037, 0.5674372072717332, 0.995151161682938, 0.6838576889364709, 0.8881819466711849, 0.8369156865797658, 0.8892256083878314, 0.8145852851818145, 0.8232025656237039, 0.8572966751747113, 0.9619391727442874, 0.5130578299924513, 0.6924103838392954, 0.9806096086633649, 0.710539125741118, 0.7646892402933144, 0.8524785867693976, 0.6203399210251034, 0.8042567192055384, 0.5773715628062346, 0.8687641497642751, 0.7444691171614937, 0.8327025119768735, 0.9680384297247804, 0.584221188423965, 0.6324979197537872, 0.5397678581285941, 0.7718456625577197, 0.8408772029527464, 0.6201950684974449, 0.7396963585795839, 0.9963818773644646, 0.9103900104861473, 0.6079105824034906, 0.9965262588913502, 0.5919618231632785, 0.5250505088491675, 0.9377619067146375, 0.7610966075403073, 0.9498260263756824, 0.9516637792383911, 0.76458077690962, 0.5798306923369025, 0.9705480921219592, 0.7277050391706027, 0.6100034946889643, 0.5968536347028064, 0.7918331561563468, 0.8495311038799545, 0.7617171576746999, 0.9439196478295522, 0.5713348837748964, 0.9455486421713666, 0.7408993606233821, 0.8343042371600045, 0.9097056014441427, 0.6454969747011028, 0.7990725319685082, 0.6718082673357111, 0.6453653001555453, 0.6034289157433365, 0.7082433977223146, 0.8442576554673502, 0.9225094785269083, 0.5126030105424411, 0.7821151660154726, 0.6623011967796426, 0.8818141660498975, 0.7760254260526372, 0.5220354819069188, 0.9766238667444141, 0.7373446275995156, 0.9196168972414087, 0.7481288242021495, 0.7483860139646886, 0.6143767300832164, 0.5012345785842749, 0.8982698641449123, 0.7853073270704793, 0.7177682372972836, 0.6233219451956471, 0.6110564814267664, 0.5865474100149786, 0.5508157392218831, 0.8480994616128568, 0.8515269992728436, 0.7819380824872577, 0.7006273499829608, 0.5305972504061935, 0.8473596065192744, 0.7219554209367318, 0.9888695058758203, 0.703947670475153, 0.8492444323812557, 0.5491207580454174, 0.5928689168861226, 0.735035524547721, 0.6237957264429901, 0.7500766843946711, 0.6799844526966411, 0.6076720941109628, 0.8013080062571293, 0.5432086125033181, 0.6224784263290659, 0.9813748846648338, 0.547962312170595, 0.7761161039169, 0.7607004472367178, 0.7888943611177186, 0.9891855369670826, 0.5306749173155756, 0.9679418391869681, 0.6660170610113625, 0.5280979596132882, 0.7361285014887602, 0.9897036704999317, 0.9713309808090773, 0.9601050165897966, 0.7903359672590702, 0.8570406957473842, 0.6079397733334317, 0.9636462627152644, 0.9160137774275994, 0.7512384998308115, 0.9272251607194808, 0.7632606491180376, 0.9144122143079596, 0.5719151578285369, 0.9925917343477639, 0.5212108381396887, 0.66314840068968, 0.9534029229502723, 0.8164309097674982, 0.8696774254217876, 0.7551709249645053, 0.6491889503455475, 0.9740136643041606, 0.7007426999390631, 0.5561031966198862, 0.9282424973924519, 0.5672684476461248, 0.89572606279721, 0.5147669511684094, 0.8249070513292425, 0.6519562156521876, 0.8333626016899562, 0.7284395731874121, 0.8674952964338116, 0.7608153410368906, 0.759450013074444, 0.6342772057291081, 0.8827867877907789, 0.555807227306301, 0.6074036665499111, 0.8308397267145953, 0.9423411428726904, 0.571860970467106, 0.9998969518571865, 0.6109782570576742, 0.5383816804356143, 0.6246842676052222, 0.7122228905653083, 0.506345735927026, 0.6594282404996585, 0.9400272237030404, 0.9465047296223252, 0.8895590693382458, 0.7607148587815604, 0.9154690818162379, 0.7854689820371437, 0.6080681067065412, 0.7701245189923087, 0.916880716039187, 0.6211435375019867, 0.9301658432136903, 0.6569808782474639, 0.559064551117254, 0.5344102451337053, 0.5103843689009846, 0.752777964052218, 0.7598825734748795, 0.6787814876108215, 0.5872057125154584, 0.6303843043565696, 0.6838289201192034, 0.5634272565918939, 0.8034786477726406, 0.680843169271353, 0.7807397761705497, 0.7714856844226079, 0.7796249999219234, 0.9405097352157825, 0.8431684145519265, 0.6248718712802102, 0.9153579442716941, 0.8558521485249484, 0.6416572068871333, 0.7112041728084362, 0.8988706742205738, 0.6431889065241378, 0.9813704755815785, 0.9789166804522087, 0.5045748047245011, 0.9408863473080353, 0.5112666671853732, 0.9896021966015169, 0.6378918591633955, 0.7566531991761563, 0.916333039610631, 0.9649922022717198, 0.9003131122747117, 0.9287888783928592, 0.7775427527945384, 0.8783106693148752, 0.9629294361456726, 0.738786311440947, 0.7045350315219203, 0.7271010086404397, 0.9641021816809925, 0.6416080514068141, 0.8344663009508233, 0.9348629021468533, 0.9302849663284447, 0.6613744261629295, 0.7723082394734793, 0.9818661613485128, 0.8322854809657457, 0.8965273243626244, 0.9290531883414379, 0.6903233725520828, 0.8015178853022147, 0.9807808862769756, 0.8633518226169598, 0.6589297783465558, 0.8787925127563637, 0.6704230482662132, 0.9399191994190643, 0.6962884927220451, 0.8260604209171543, 0.6327721694429773, 0.6011494003224086, 0.665163162224706, 0.8739879005551768, 0.8343629715883405, 0.5977174596873973, 0.9863150302171255, 0.9569219178442696, 0.9356254876310081, 0.5315888966403847, 0.6200855056432282, 0.590864185946136, 0.53658100274591, 0.715029834252066, 0.7270492923041673, 0.8797108478817689, 0.5709397199391697, 0.9106261788691045, 0.6343319583598195, 0.5858548567134482, 0.5757885169411893, 0.8832267988817176, 0.9945935042457059, 0.7973678388545082, 0.9359080044740966, 0.7095990418386695, 0.8094394831469017, 0.9049497793936964, 0.7510879174816284, 0.5205648842236267, 0.5749567915647196, 0.7189347044586077, 0.8727093923540378, 0.8682924448095202, 0.6001712502495308, 0.8069099200470592, 0.9072975836191894, 0.5564007137818066, 0.8221973648962009, 0.75811495001263, 0.8283369917818326, 0.9026499418080398, 0.9162019589565324, 0.5856906634376196, 0.96290282515169, 0.6008785654610242, 0.7122861281221822, 0.7018951403406387, 0.5282215296969814, 0.5578217851845879, 0.5165635632933487, 0.9164730278184412, 0.5045621178284894, 0.9461511580531841, 0.6068905606389845, 0.9863458161197843, 0.7161346121716731, 0.9420345627213724, 0.7293616604338414, 0.5211030040691735, 0.9037762449361731, 0.5082005056475791, 0.9311672047865949, 0.5118076346198535, 0.6523131597082291, 0.8538326490922968, 0.599350064450785, 0.7305312828350665, 0.925160723141524, 0.9780028372420322, 0.5749382708951943, 0.5744802655553339, 0.9406106411235238, 0.747792752108919, 0.7523086758783895, 0.8674767859693182, 0.6526456804805915, 0.8645087541828086, 0.5107994042842043, 0.8854043709691495, 0.958494624518552, 0.7872287106286882, 0.709201907507504, 0.6689384239108431, 0.9991551909402256, 0.5085025441130884, 0.5794588933433766, 0.5844837986090494, 0.6118031672175229, 0.911925276551961, 0.5736404990616155, 0.8232233559692514, 0.7299189323857512, 0.9969113187013892, 0.5892632963688285, 0.7528298268060762, 0.659175100502537, 0.6237049106925392, 0.8850607539374011, 0.9536694598997102, 0.7002091909729588, 0.8242758088830147, 0.6104542499130519, 0.8639513345333475, 0.8932450140280509, 0.5485682085483337, 0.6399108063911134, 0.7881044906425451, 0.8725803379005208, 0.8975808221055315, 0.938024276532454, 0.6650501390887882, 0.6992489421917586, 0.9701161379573577, 0.8599364579667778, 0.6513557052888841, 0.9915736174126177, 0.8191557354971084, 0.6206899512451707, 0.9144592371083711, 0.6383982355686058, 0.741998947467668, 0.8361978542311979, 0.84962878557439, 0.8590893348449309, 0.8596948532676587, 0.8701705153540853, 0.6315857115733519, 0.7509560513868097, 0.8941838379864693, 0.6738557513680036, 0.7363345522186565, 0.672052271231676, 0.8835233179254007, 0.6693432135784432, 0.5757130057314513, 0.8261955044083621, 0.5852020417597226, 0.8302818717309136, 0.9088301485556398, 0.9139506453417019, 0.8030039258273112, 0.618524315825521, 0.6865353449085283, 0.8739817584818519, 0.632769668659569, 0.6908149334450482, 0.6927237548965595, 0.8628200125916192, 0.8590412425410114, 0.7018430606968868, 0.9213824742628438, 0.7414004639245292, 0.598748538220057, 0.5528009909138447, 0.8484798768635864, 0.9256293163759332, 0.9197746208621969, 0.9749468949822566, 0.5411807588434523, 0.7525987563092837, 0.7866265793640316, 0.994409479562365, 0.5859833552138833, 0.7994878427045353, 0.6020975590131978, 0.7042492438448726, 0.6217799424632134, 0.5354625671995132, 0.8080557016068661, 0.9492856675465233, 0.5122307198483405, 0.8405930446919273, 0.6437919164558843, 0.5151918310331657, 0.549755747767048, 0.7472361786316583, 0.5562919862074065, 0.6770545343037007, 0.6843565342179296, 0.7890844377971458, 0.6445744836302529, 0.725916741866188, 0.9048313143293408, 0.7409348047064588, 0.8969056961463021, 0.9361298970976466, 0.9624538334749448, 0.7272326020944075, 0.8364402130963664, 0.8495401714333635, 0.7685651768948998, 0.59582819481912, 0.7840758839882993, 0.7019776847463882, 0.5187659065280869, 0.8710485595144026, 0.7180722295954325, 0.6068836901570684, 0.9641235489549258, 0.9651090042828976, 0.6379599785616985, 0.9441078289126331, 0.7372223177570529, 0.5371721906866889, 0.6747897539401984, 0.5236276272868565, 0.6321644118773123, 0.9624420156066794, 0.9291293214959884, 0.7111834587029189, 0.9050451603278367, 0.7549433639829841, 0.7465902593661318, 0.5117452863953508, 0.8803625394965805, 0.6118376209350819, 0.5574962578021898, 0.9624029637154977, 0.7725756405209483, 0.9382983310613413, 0.6797603981898187, 0.8337443535599166, 0.5011364745935383, 0.9872448730543374, 0.9171690725986543, 0.76183285947949, 0.7182726794529586, 0.6007925176619024, 0.5527609929665667, 0.6481070189990576, 0.5923636300765208, 0.8160503818425161, 0.9365102746553231, 0.7736981245224566, 0.7605648722619249, 0.9333626932365386, 0.9429610555191514, 0.5043671038711677, 0.5476375593488494, 0.5310417732620699, 0.5774709694161161, 0.7385516946790663, 0.5877739457577427, 0.9256266139759437, 0.7962619015551782, 0.8754419641703729, 0.8765639391957611, 0.654655614945783, 0.9231079357863452, 0.8093617469980275, 0.8588536646513388, 0.5591409388183455, 0.741028531502632, 0.7312108590993045, 0.5736171872670586, 0.7187736157572833, 0.6015910393121435, 0.534899529057655, 0.5767132051203742, 0.8905323441159252, 0.6922778194543302, 0.8453038270329627, 0.8669898704269228, 0.5428270939327391, 0.8661444394915716, 0.8830107157278955, 0.9876224128038178, 0.5811299277648962, 0.7781082673772666, 0.5097534793480623, 0.7569348680852335, 0.6546845271066593, 0.86870427625371, 0.9616573844450222, 0.6305346824435085, 0.6405519580314807, 0.9200128288876654, 0.7648767813417923, 0.5531160630750107, 0.9220135124141662, 0.6929278667161934, 0.5584234960279145, 0.5387445417642096, 0.686403833662957, 0.7659965646883253, 0.5841663299763712, 0.8812106990087814, 0.988240266704844, 0.7277822889748686, 0.6816443009469657, 0.9201240441023447, 0.6721073479997417, 0.7471859813903733, 0.9464619717742093, 0.5309995436902474, 0.9614579971518027, 0.9887745003271984, 0.7254534708646083, 0.8333981559377882, 0.9356436616740794, 0.5725651809884322, 0.838048478558742, 0.5764073651419267, 0.921812457449674, 0.940849632853658, 0.9396440691268897, 0.8577122482380009, 0.717610942755937, 0.512712909920964, 0.6469906333971966, 0.986267505187467, 0.7880970967187115, 0.6983222586747296, 0.7357727349744225, 0.5664123365399178, 0.5972293300899021, 0.6372399359254891, 0.6902656000162729, 0.9856134643247783, 0.5161614789501192, 0.5198535566155821, 0.5526549599157097, 0.9007760272218424, 0.8633098539816491, 0.9043213574466145, 0.8017890864570727, 0.8057948263693244, 0.7953305125438946, 0.8941531905803687, 0.806127569893911, 0.669253525834529, 0.931567674007615, 0.5898262666106988, 0.7664128203562691, 0.6898371281098495, 0.9402131172476212, 0.5462007161232376, 0.9037662490342141, 0.9833009791944044, 0.502334176557468, 0.7651728070828656, 0.5561600844770741, 0.9170540416236816, 0.8904206703882754, 0.8489861234077738, 0.889188792948248, 0.8125805560867925, 0.7092042592242037, 0.5764236086928209, 0.7427072202163683, 0.8733994740765748, 0.8845297565439765, 0.7341032469184819, 0.5826668524448593, 0.6783470874106791, 0.6700269846325348, 0.6089757562849996, 0.9577350224777994, 0.7940954322166751, 0.5775897570998443, 0.6153981228436263, 0.6105658739460591, 0.8033255490555549, 0.739868340261403, 0.6984376231430982, 0.693582907818113, 0.6869706669139439, 0.9303894726374518, 0.9180504565164317, 0.8105272859805797, 0.8889374703572381, 0.8552879374946907, 0.7774253530234071, 0.7458637027518951, 0.7776549998002066, 0.8338397116087878, 0.7686071110511765, 0.8332084800751365, 0.7447386506371092, 0.7249771186928371, 0.7932199758840237, 0.6299717672171834, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 654, 656, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 752, 754, 756, 758, 761, 763, 765, 767, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 836, 838, 840, 842, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 868, 870, 873, 875, 880, 882, 884, 886, 888, 890, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 918, 920, 923, 925, 928, 930, 933, 935, 938, 940, 942, 944, 947, 949, 952, 954, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1040, 1042, 1044, 1046, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1082, 1084, 1086, 1088, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1142, 1144, 1147, 1149, 1152, 1154, 1157, 1159, 1165, 1167, 1170, 1172, 1175, 1177, 1180, 1182, 1185, 1187, 1189, 1191, 1193, 1195, 1198, 1200, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 1362, 1364, 1366, 1368, 1370, 1372, 1374, 1376, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1396, 1398, 1400, 1402, 1405, 1407, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1430, 1432, 1434, 1436, 1438, 1440, 1443, 1445, 1448, 1450, 1456, 1458, 1461, 1463, 1467, 1469, 1471, 1473, 1475, 1477, 1480, 1482, 1485, 1487, 1490, 1492, 1495, 1497, 1500, 1502, 1505, 1507, 1510, 1512, 1514, 1516, 1518, 1520, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1560, 1562, 1564, 1566, 1569, 1571, 1574, 1576, 1581, 1583, 1586, 1588, 1591, 1593, 1596, 1598, 1601, 1603, 1606, 1608, 1611, 1613, 1616, 1618, 1621, 1623, 1626, 1628, 1631, 1633, 1156, 1156, 1164, 1162, 1161, 1164, 1162, 1161, 1590, 1455, 1453, 409, 408, 1590, 1455, 1453, 1455, 1453, 1578, 1573, 1585, 1504, 1509, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 409, 408, 1453, 1455, 1455, 1453, 1504, 1509, 1479, 1489, 1479, 1489, 1635, 1630, 1585, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1378, 1504, 1509, 1504, 1509, 1378, 1504, 1509, 1504, 1509, 1578, 1573, 1585, 1590, 1578, 1573, 1585, 1590, 1630, 1635, 1635, 1630, 1578, 1573, 1585, 1590, 946, 958, 1455, 1453, 1489, 1489, 1504, 1509, 1504, 1509, 1578, 1573, 1578, 1573, 1585, 1590, 1578, 1573, 1578, 1573, 1585, 1590, 1580, 1305, 858, 858, 845, 845, 879, 879, 958, 946, 946, 958, 1141, 1141, 1091, 1091, 1164, 1162, 1164, 1162, 1164, 1162, 1164, 1162, 1455, 1453, 1479, 1479, 1479, 1489, 1479, 1489, 1455, 1453, 1305, 1509, 1509, 1504, 1504, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1635, 1630, 1635, 1630, 1580, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3578, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762, 3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, 3826, 3828, 3830, 3832, 3834, 3836, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3854, 3856, 3858, 3860, 3862, 3864, 3866, 3868, 3870, 3872, 3874, 3876, 3878, 3880, 3882, 3884, 3886, 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 877, 872, 4099, 858, 927, 922, 956, 951, 653, 653, 892, 1146, 1151, 1151, 1146, 1162, 1179, 1174, 658, 658, 927, 922, 4117, 956, 951, 927, 922, 1151, 1146, 1146, 1151, 658, 653, 1202, 1197, 4483, 4486, 1202, 1197, 1465, 1460, 1484, 1479, 1452, 1447, 4490, 4492, 1484, 1499, 1494, 4141, 4495, 1452, 1447, 4497, 1465, 1460, 1484, 1509, 4146, 4499, 1452, 1447, 1453, 1455, 1465, 1460, 4224, 1479, 1489, 1499, 1494, 4502, 1452, 1447, 4504, 1452, 1447, 4506, 1460, 4508, 1452, 1447, 4510, 1452, 1447, 4512, 1465, 4514, 1452, 1447, 1452, 1447, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1504, 4518, 1479, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4224, 1479, 1489, 1494, 1499, 4520, 1455, 1453, 1465, 1460, 4224, 4522, 4524, 4183, 4184, 4526, 4458, 1452, 1447, 4529, 1452, 1447, 4531, 4533, 4189, 1447, 1452, 4535, 1452, 1447, 4537, 4539, 4194, 1452, 1447, 4541, 1452, 1447, 4543, 1465, 1460, 4545, 1484, 1484, 1499, 1494, 1499, 1494, 1499, 1494, 1504, 1452, 1447, 4547, 1452, 1447, 4549, 1465, 1460, 4206, 1452, 1447, 4551, 1452, 1447, 4553, 1465, 1460, 4445, 1484, 1479, 1499, 1494, 1504, 1499, 1494, 1499, 1494, 1452, 1447, 1455, 1453, 1465, 1460, 4217, 1479, 1489, 1499, 1494, 4556, 1499, 1494, 4558, 1447, 1452, 1453, 1455, 1465, 1460, 4224, 1479, 1489, 1499, 1494, 4561, 1494, 1499, 4563, 4565, 4567, 1600, 4569, 4571, 1595, 1610, 1605, 1615, 1625, 1620, 1625, 1620, 1625, 1620, 4575, 4577, 4579, 927, 922, 4240, 956, 951, 927, 922, 4246, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 658, 653, 1202, 1197, 4583, 1479, 1484, 1484, 1499, 1494, 1509, 1499, 1494, 1504, 1447, 1452, 1453, 1455, 1465, 1460, 4276, 1378, 1479, 1489, 1499, 1494, 4587, 1499, 1494, 4589, 4591, 4593, 4595, 1600, 1595, 4597, 4599, 4601, 1600, 1595, 4458, 4405, 1590, 1585, 1595, 1600, 1605, 1610, 1559, 1620, 1625, 1635, 1630, 877, 872, 877, 872, 877, 872, 877, 872, 4300, 4302, 877, 872, 858, 877, 872, 877, 872, 4311, 892, 927, 922, 4316, 956, 951, 4611, 927, 922, 937, 932, 956, 951, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1164, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1164, 1161, 1179, 1174, 1179, 1174, 4380, 1202, 1197, 1151, 1146, 1141, 1151, 1146, 1156, 4619, 1169, 4621, 1161, 1151, 1146, 1141, 1151, 1146, 1156, 4623, 1161, 4625, 1169, 1179, 1174, 1184, 4380, 1202, 1197, 1573, 1590, 1585, 4390, 4627, 1484, 1484, 1578, 1590, 1585, 1452, 1447, 1455, 1453, 1465, 1460, 4390, 1378, 4631, 1499, 1494, 1494, 1499, 1452, 1447, 1455, 1453, 1465, 1460, 4413, 1378, 4633, 1499, 1494, 1494, 1499, 1452, 1447, 1452, 1447, 4635, 1484, 1479, 1509, 1504, 4405, 4407, 1452, 1447, 1453, 1455, 1460, 1465, 4413, 1378, 1489, 1479, 1499, 1494, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4424, 1378, 1479, 1489, 1499, 1494, 1499, 1494, 1452, 1447, 4642, 1452, 1447, 4644, 1465, 1460, 4438, 1452, 1447, 4646, 1452, 1447, 4648, 1465, 1460, 4445, 1484, 1479, 1484, 1489, 1499, 1494, 1509, 1504, 4458, 1590, 1585, 1600, 1595, 1605, 1610, 1559, 1625, 1620, 4650, 4458, 1585, 1590, 1595, 1600, 1605, 1610, 1559, 1625, 1620, 4652, 1578, 1573, 1590, 1585, 1600, 1595, 1610, 1605, 1615, 1625, 1620, 1635, 1630, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4719, 4720, 4721, 4722, 4724, 4725, 4727, 4728, 4729, 4730, 4731, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4745, 4746, 4748, 4749, 4751, 4753, 4754, 4756, 4757, 4759, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4794, 4795, 4796, 4797, 4798, 4801, 4802, 4804, 4805, 4806, 4808, 4809, 4812, 4813, 4814, 4816, 4817, 4820, 4821, 4822, 4824, 4825, 4827, 4828, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4842, 4843, 4845, 4846, 4847, 4848, 4849, 4851, 4852, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4874, 4875, 4876, 4878, 4879, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4893, 4894, 4898, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4958, 4959, 4964, 4965, 4969, 4970, 4971, 4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982, 4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 5005, 5006, 5007, 5009, 5010, 5011, 5012, 5013, 5014, 5015, 5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5038, 5039, 5040, 5041, 5042, 5043, 5044, 5045, 5046, 5047, 5048, 5049, 5050, 5051, 5052, 5053, 5054, 5055, 5056, 5057, 5059, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5069, 5071, 5072, 5073, 5074, 5075, 5076, 5077, 5078, 5079, 5080, 5081, 5083, 5084, 5085, 5086, 5087, 5088, 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5110, 5111, 5112, 5113, 5114, 5115, 5116, 5117, 5119, 5120, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5128, 5129, 5130, 5131, 5132, 5133, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5148, 5149, 5150, 5151, 5152, 5153, 5154, 5156, 5157, 5159, 5160, 5161, 5162, 5163, 5165, 5166, 5168, 5169, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177, 5178, 5179, 5180, 5181, 5182, 5183, 5184, 5185, 5186, 5187, 5188, 5190, 5191, 5192, 5193, 5194, 5195, 5196, 5197, 5198, 5199, 5201, 5202, 5203, 5204, 5205, 5206, 5207, 5208, 5209, 5210, 5211, 5212, 5213, 4488, 4485, 4604, 4604, 4800, 4799, 4604, 4604, 4800, 4799, 4604, 4604, 4604, 4604, 4603, 4604, 4603, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 5248, 5252, 5254, 5259, 5261, 5264, 5268, 5271, 5273, 5275, 5277, 5279, 5281, 5283, 5285, 5289, 5292, 5295, 5297, 5302, 5304, 5306, 5309, 5311, 5313, 5315, 5318, 5320, 5323, 5325, 5329, 5333, 5337, 5341, 5343, 5345, 5347, 5350, 5352, 5354, 5356, 5362, 5364, 5367, 5369, 5372, 5374, 5376, 5380, 5382, 5384, 5387, 5389, 5391, 5394, 5396, 5398, 5403, 5406, 5408, 5410, 5412, 5414, 5417, 5419, 5421, 5423, 5425, 5427, 5430, 5432, 5434, 5438, 5441, 5443, 5445, 5447, 5450, 5452, 5455, 5457, 5459, 5461, 5464, 5466, 5471, 5474, 5477, 5479, 5481, 5485, 5487, 5489, 5491, 5493, 5497, 5499, 5501, 5504, 5506, 5508, 5510, 5512, 5514, 5518, 5521, 5523, 5527, 5530, 5532, 5534, 5536, 5538, 5540, 5542, 5544, 5549, 5551, 5553, 5558, 5560, 5562, 5565, 5568, 5570, 5573, 5575, 5578, 5583, 5586, 5591, 5595, 5598, 5604, 5606, 5608, 5610, 5614, 5616, 5618, 5620, 5622, 5626, 5628, 5630, 5632, 5636, 5640, 5642, 5644, 5648, 5650, 5652, 5654, 5656, 5658, 5662, 5664, 5666, 5668, 5670, 5672, 5675, 5677, 5679, 5686, 5688, 5691, 5693, 5695, 5698, 5701, 5703, 5705, 5708, 5710, 5712, 5714, 5716, 5719, 5721, 4608, 4607, 5258, 5548, 5557, 5582, 5581, 5590, 5589, 4608, 4607, 5258, 5548, 5557, 5582, 5581, 5590, 5589, 4608, 4607, 5526, 5548, 5557, 5582, 5581, 5590, 5723, 5724, 5468, 4586, 4585, 5725, 4603, 5726, 4603, 5727, 5728, 5288, 4718, 4760, 5366, 5371, 4586, 4585, 4586, 4585, 4586, 4585, 4603, 5436, 4603, 5437, 4603, 4586, 4585, 5468, 5729, 4603, 5730, 4603, 4752, 4760, 5366, 5371, 4586, 4585, 4586, 4585, 4586, 4585, 5340, 5731, 5732, 4603, 5436, 4603, 5437, 4603, 5366, 5371, 4586, 4585, 5402, 5733, 5436, 5734, 5437, 5735, 4586, 4585, 5468, 5736, 5737, 5738, 5739, 4604, 4603, 4608, 4607, 5526, 5548, 5557, 5582, 5581, 5590, 5589, 5635, 5685, 4637, 5685, 4630, 4629, 4637, 5096, 5109, 5635, 5685, 4637, 4637, 5685, 5683, 4654, 4654, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4610, 4609, 4606, 4605, 5941, 5942, 5251, 4610, 4609, 5943, 5449, 4581, 5454, 4582, 5870, 4613, 4614, 4615, 4481, 5944, 4481, 4615, 5945, 4481, 4615, 5567, 4618, 4617, 5772, 4481, 4615, 5946, 5947, 4481, 4615, 5948, 5949, 5593, 5773, 4610, 4609, 4606, 4605, 5950, 5951, 4610, 4609, 5520, 5952, 5449, 4581, 5454, 4582, 5870, 4614, 4613, 4615, 4481, 5953, 4481, 4615, 5954, 4481, 4615, 5567, 4618, 4617, 5772, 4481, 4615, 5955, 5956, 4481, 4615, 5957, 5958, 5593, 5773, 4610, 4609, 4606, 4605, 5959, 5960, 4610, 4609, 5520, 5961, 5270, 4581, 5454, 4582, 5870, 4614, 4613, 4482, 4616, 5962, 4482, 4616, 5963, 4482, 4616, 5567, 4618, 4617, 5772, 4482, 4616, 5964, 5965, 4482, 4616, 5966, 5593, 5773, 4726, 4723, 5674, 5167, 5164, 5681, 5969, 5970, 5971, 5476, 5300, 5848, 5483, 5850, 4960, 4957, 5973, 5937, 5718, 5940, 5975, 5856, 5503, 5859, 5799, 5358, 5976, 4880, 4877, 4844, 4778, 5393, 4853, 4850, 5400, 5978, 5405, 4639, 4638, 5795, 5349, 5797, 4793, 4750, 4717, 5979, 4758, 4755, 5980, 4810, 4807, 5981, 4818, 4517, 4516, 5982, 5983, 5984, 4638, 5985, 5986, 4639, 5987, 5988, 5339, 5780, 5308, 5782, 4744, 5989, 5990, 5991, 5992, 5440, 4574, 4573, 4803, 5993, 5856, 5503, 5859, 4726, 4723, 5674, 5167, 5164, 5681, 5994, 5995, 5996, 5476, 5300, 5848, 5483, 5850, 4960, 4957, 5998, 5937, 5718, 5940, 6000, 5856, 5503, 5859, 5780, 5308, 5782, 4744, 4750, 4747, 6001, 4758, 4755, 6002, 4810, 4807, 6003, 4818, 4517, 4516, 6004, 6005, 6006, 4638, 6007, 6008, 4639, 6009, 6010, 5339, 4844, 4778, 5393, 4853, 4850, 5400, 6011, 5405, 4639, 4638, 5795, 5349, 5797, 4793, 5799, 5358, 6012, 4880, 4877, 6014, 6015, 6016, 6017, 5440, 4574, 4573, 4803, 6018, 5856, 5503, 5859, 4810, 4807, 6019, 4818, 4815, 6020, 4826, 4823, 4829, 6021, 6022, 5386, 4639, 4638, 4844, 4841, 5393, 4853, 4850, 5400, 6023, 4639, 4638, 5405, 5821, 5416, 5823, 4880, 4877, 5827, 5429, 5829, 4895, 4892, 6025, 6027, 5440, 4911, 4574, 4573, 5856, 5503, 5859, 5449, 4581, 5454, 4582, 5463, 4616, 4615, 5844, 5155, 4935, 5674, 5167, 5164, 5681, 6029, 6030, 6031, 5476, 5473, 5848, 5483, 5850, 4960, 4957, 6032, 5853, 6034, 5854, 5718, 5940, 6036, 6037, 5856, 5503, 5859, 4610, 4609, 4606, 4605, 6038, 6039, 4610, 4609, 5520, 6040, 5529, 5008, 5870, 4614, 4613, 5546, 4616, 4615, 6041, 5555, 4616, 4615, 6042, 5564, 4616, 4615, 5567, 4618, 4617, 5885, 5580, 5577, 6043, 6044, 5588, 5585, 6045, 6046, 5593, 5891, 5908, 5646, 5910, 4639, 4638, 5914, 5600, 5916, 4641, 4640, 5155, 5158, 5674, 5164, 5118, 5681, 6047, 6048, 5906, 6049, 5937, 5718, 5940, 5908, 5646, 5910, 4639, 4638, 5914, 5600, 5916, 4641, 4640, 5155, 5082, 5674, 5164, 5118, 5681, 6050, 6051, 6052, 5926, 6053, 5937, 5718, 5940, 5895, 5612, 6054, 4641, 4640, 5900, 5624, 6055, 4641, 4640, 5908, 5646, 5910, 4639, 4638, 5155, 5158, 5674, 5164, 5118, 5681, 6056, 6057, 5906, 6058, 5928, 5697, 5189, 6059, 5932, 5707, 5200, 5908, 5646, 5910, 4639, 4638, 5914, 5660, 5916, 4641, 4640, 5158, 5155, 5674, 5167, 5164, 5681, 6060, 6061, 5926, 6062, 5928, 5697, 5189, 6063, 5932, 5707, 5200, 4654, 5937, 5718, 5940, 58, 59, 60, 61, 62, 63, 6080, 6081, 6082, 6083, 6084, 6086, 6087, 6088, 6090, 6091, 6092, 6093, 6094, 6095, 6096, 6097, 6098, 6100, 6101, 6103, 6104, 6105, 6106, 6107, 6108, 6109, 6110, 6111, 6113, 6114, 6115, 6117, 6118, 6119, 6120, 6121, 6122, 6123, 6125, 6126, 6127, 6129, 6130, 6131, 6132, 6133, 6134, 6135, 6136, 6137, 6139, 6140, 6142, 6143, 6144, 6145, 6146, 6147, 6148, 6149, 6150, 6152, 6153, 6154, 6156, 6157, 6158, 6159, 6160, 6161, 6162, 6164, 6165, 6166, 6168, 6169, 6170, 6171, 6172, 6173, 6174, 6175, 6176, 6178, 6179, 6181, 6182, 6183, 6184, 6185, 6186, 6187, 6188, 6189, 6191, 6192, 6193, 6194, 6195, 6196, 6197, 6198, 6199, 6200, 6201, 6202, 6205, 6206, 6207, 6208, 6209, 6210, 6211, 5972, 6213, 6214, 6215, 5974, 6217, 6218, 6219, 6220, 6221, 6223, 6224, 6225, 6226, 6227, 6228, 6229, 6230, 6232, 6233, 6234, 6235, 6236, 6237, 6238, 6239, 6240, 6242, 6243, 6245, 6246, 6248, 6249, 6250, 6252, 6254, 6255, 6257, 6258, 6260, 6261, 6262, 6263, 6264, 6269, 6270, 6271, 6272, 6274, 6275, 6276, 6277, 6278, 6279, 6280, 6281, 6282, 6283, 6286, 6287, 6288, 6289, 6290, 6291, 6292, 5997, 6294, 6295, 6296, 5999, 6298, 6299, 6300, 6301, 6302, 6303, 6304, 6305, 6306, 6308, 6309, 6311, 6312, 6314, 6315, 6316, 6318, 6320, 6321, 6323, 6324, 6326, 6327, 6328, 6329, 6330, 6331, 6332, 6334, 6335, 6336, 6337, 6338, 6339, 6340, 6341, 6342, 6344, 6345, 6350, 6351, 6352, 6353, 6355, 6356, 6357, 6358, 6359, 6361, 6362, 6364, 6365, 6366, 6367, 6369, 6370, 6371, 6372, 6373, 6374, 6375, 6376, 6377, 6379, 6380, 6381, 6382, 6383, 6384, 6385, 6386, 6387, 6388, 6389, 6390, 6391, 6394, 6395, 6396, 6397, 6398, 6399, 6400, 6401, 6402, 6403, 6404, 6405, 6406, 6407, 6408, 6409, 6410, 6411, 6412, 6413, 6414, 6415, 6418, 6419, 6420, 6421, 6422, 6423, 6424, 6426, 6428, 6429, 6430, 6431, 6433, 6434, 6435, 6436, 6437, 6438, 6439, 6440, 6442, 6443, 6444, 6446, 6447, 6448, 6449, 6450, 6451, 6452, 6453, 6455, 6456, 6457, 6459, 6460, 6461, 6462, 6463, 6464, 6465, 6466, 6467, 6468, 6470, 6471, 6472, 6474, 6475, 6476, 6477, 6478, 6479, 6480, 6481, 6482, 6483, 6484, 6485, 6486, 6487, 6488, 6489, 6490, 6491, 6492, 6494, 6496, 6497, 6498, 6499, 6500, 6501, 6502, 6503, 6504, 6505, 6506, 6507, 6508, 6509, 6510, 6511, 6512, 6513, 6514, 6515, 6518, 6520, 6521, 6522, 6523, 6524, 6526, 6527, 6528, 6529, 6531, 6532, 6533, 6534, 6535, 6536, 6537, 6538, 6539, 6540, 6541, 6542, 6543, 6544, 6546, 6548, 6549, 6550, 6552, 6553, 6554, 6555, 6556, 6557, 6558, 6559, 6560, 6561, 6562, 6563, 6564, 6565, 6566, 6567, 6568, 6569, 6570, 6571, 6573, 6575, 6576, 6577, 6579, 6580, 6581, 6582, 6583, 6584, 6585, 6268, 6266, 6349, 6347, 6393, 6392, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6592, 6594, 6597, 6605, 6607, 6609, 6611, 6614, 6617, 6620, 6625, 6627, 6630, 6638, 6640, 6642, 6644, 6647, 6650, 6653, 6658, 6660, 6663, 6671, 6673, 6675, 6677, 6680, 6683, 6686, 6688, 6691, 6694, 6697, 6698, 6703, 6715, 6717, 6720, 6723, 6730, 6732, 6734, 6736, 6750, 6756, 6759, 6762, 6763, 6768, 6782, 6784, 6786, 6788, 6797, 6800, 6803, 6812, 6815, 6821, 6823, 6825, 6829, 6832, 6835, 6838, 6844, 6849, 6852, 6862, 6866, 6869, 6872, 6873, 6878, 6888, 6890, 6893, 6899, 6901, 6904, 6907, 6911, 6914, 6917, 6925, 6930, 6932, 6935, 6946, 6951, 6953, 6956, 6959, 6966, 6970, 6975, 6977, 6980, 6994, 6999, 7001, 7004, 6603, 6601, 6624, 6636, 6634, 6657, 6669, 6667, 6690, 6701, 6706, 6708, 6710, 6712, 6714, 6727, 6729, 6744, 6742, 6740, 6746, 6748, 7019, 7020, 6753, 6755, 6766, 6771, 6773, 6775, 6777, 6779, 6781, 6796, 6794, 6792, 6807, 6809, 6811, 7021, 7022, 6818, 6820, 6842, 6847, 7023, 7024, 6855, 6857, 6861, 6859, 6921, 6876, 6881, 6880, 6883, 6885, 6887, 6897, 6921, 6923, 6928, 6939, 6940, 6942, 6944, 6949, 6961, 6963, 6965, 6969, 6973, 6984, 6985, 6987, 6988, 6990, 6992, 6997, 7008, 7009, 7011, 7012, 7014, 7016, 7018, 61, 62, 63, 7040, 7042, 7050, 7052, 7060, 7062, 7079, 7083, 7084, 7093, 7096, 7098, 7102, 7105, 7108, 7109, 7115, 7117, 7119, 7120, 7121, 7043, 7143, 7144, 6613, 6102, 6099, 6616, 6622, 6619, 7145, 7053, 7146, 7147, 6646, 6141, 6138, 6649, 6655, 6652, 7148, 7063, 7149, 7150, 6679, 6180, 6177, 6682, 7070, 6685, 7151, 6696, 6693, 7074, 7152, 7075, 7153, 7154, 7155, 7156, 7157, 7076, 6722, 6719, 7158, 7159, 6247, 6244, 6241, 7160, 7161, 7162, 7163, 7164, 7165, 7167, 7168, 6761, 6758, 7088, 7169, 7089, 7170, 7171, 7172, 7173, 7174, 7175, 6313, 6310, 6307, 7176, 7177, 7178, 6802, 6799, 7179, 7180, 7181, 7097, 7182, 7184, 7185, 6827, 6363, 6360, 6837, 6834, 7186, 7106, 7187, 7107, 7188, 7190, 7191, 7118, 7192, 7193, 6865, 6919, 6916, 7194, 6871, 6868, 7113, 7195, 7114, 7196, 7197, 7198, 7199, 7200, 7118, 7201, 6913, 6919, 6916, 7202, 7203, 7125, 7204, 7126, 6937, 6934, 7205, 7206, 7207, 7208, 7129, 7209, 7130, 6958, 6955, 6960, 7210, 7211, 7212, 7134, 7213, 7135, 7214, 7136, 6982, 6979, 7215, 7216, 7217, 7218, 7219, 7220, 7139, 7221, 7140, 7006, 7003, 7222, 7223, 7224, 7225, 7226, 7227, 7228, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6089, 6596, 7253, 7256, 7257, 7258, 7259, 7260, 7261, 6128, 6629, 7263, 7266, 7267, 7268, 7269, 7270, 7271, 6167, 6662, 7273, 7276, 7277, 7278, 7279, 7280, 7281, 7283, 7284, 7285, 7287, 7293, 7294, 7295, 7238, 6251, 7298, 7299, 7300, 7301, 7240, 7309, 7310, 7311, 7313, 6317, 7320, 7321, 7322, 7323, 7326, 7327, 7242, 7331, 7243, 7335, 7336, 7337, 7244, 7338, 7339, 7245, 7341, 7343, 7246, 6445, 6892, 7347, 6910, 6458, 6454, 7350, 7351, 7352, 7354, 7355, 7356, 7358, 7359, 6445, 6892, 7364, 6910, 6458, 6454, 7366, 7367, 7368, 7371, 7373, 7374, 7375, 7380, 7382, 7383, 7384, 7385, 7389, 7391, 7393, 7394, 7395, 7402, 7404, 7405, 7406, 7291, 7289, 7305, 7297, 7308, 7317, 7315, 7319, 7329, 7334, 7346, 7363, 7378, 7387, 7400, 7398, 7413, 7411, 7409, 61, 62, 63, 7424, 7425, 7426, 7427, 7431, 7433, 7434, 7435, 7436, 7440, 7442, 7443, 7444, 7445, 7449, 7451, 7456, 7458, 7459, 7461, 7463, 7464, 7465, 7469, 7471, 7473, 7474, 7476, 7478, 7479, 7482, 7483, 7485, 7488, 7489, 7490, 7491, 7492, 7493, 7494, 7496, 7498, 7503, 7504, 7505, 7506, 7507, 7508, 7510, 7514, 7518, 7524, 7528, 7454, 7530, 7531, 7532, 7533, 7455, 7534, 7468, 7535, 7536, 7537, 7477, 7538, 7539, 7487, 7486, 7540, 7501, 7541, 7361, 7513, 7512, 7542, 7517, 7516, 7543, 7523, 7522, 7521, 7544, 7545, 7527, 7526, 7546, 7547, 7548, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7552, 7554, 7555, 7557, 7559, 7560, 7562, 7564, 7565, 7570, 7575, 7581, 7586, 7588, 7589, 7594, 7597, 7262, 7272, 7282, 7605, 7453, 7606, 7569, 7610, 7573, 7612, 7467, 7613, 7579, 7616, 7580, 7619, 7620, 7584, 7585, 7353, 7622, 7500, 7624, 7369, 7376, 7625, 7626, 7520, 7628, 7629, 7396, 7631, 7632, 7633, 7634, 7407, 7636, 7637, 7638, 56, 57, 58, 59, 60, 61, 62, 63, 7689, 7690, 7694, 7696, 7681, 7697, 7430, 7684, 7698, 7439, 7687, 7699, 7448, 7701, 7703, 7705, 7707, 7709, 7615, 7711, 7714, 7582, 7712, 7715, 7693, 7716, 7718, 7623, 7596, 7720, 7721, 7724, 7727, 7729, 7732, 7735, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7748, 7750, 7751, 7753, 7754, 7756, 7700, 7572, 7608, 7611, 7706, 7577, 7618, 7765, 7621, 7768, 7495, 7717, 7772, 7509, 7774, 7775, 7776, 7778, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7749, 7752, 7755, 7815, 7819, 7764, 7824, 7827, 7828, 7829, 7830, 7831, 7708, 7702, 7771, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7875, 7876, 7766, 7769, 7773, 7874, 7873, 7872, 7884, 7885, 7886, 7779, 7731, 7630, 7627, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7816, 7937, 7941, 7942, 7943, 7822, 7939, 7940, 7947, 7948, 7949, 7950, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8000, 8001, 8002, 8005, 8006, 8007, 8008, 8010, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8066, 7820, 7817, 8070, 7946, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8129, 8130, 8131, 8132, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7944, 8067, 8194, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8256, 8258, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8320, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8384, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8448, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8321, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 493, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 655, 657, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 753, 755, 757, 759, 762, 764, 766, 768, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 837, 839, 841, 843, 847, 849, 851, 853, 855, 857, 860, 862, 864, 866, 869, 871, 874, 876, 881, 883, 885, 887, 889, 891, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 919, 921, 924, 926, 929, 931, 934, 936, 939, 941, 943, 945, 948, 950, 953, 955, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1041, 1043, 1045, 1047, 1050, 1052, 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1083, 1085, 1087, 1089, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1143, 1145, 1148, 1150, 1153, 1155, 1158, 1160, 1166, 1168, 1171, 1173, 1176, 1178, 1181, 1183, 1186, 1188, 1190, 1192, 1194, 1196, 1199, 1201, 1204, 1206, 1208, 1210, 1212, 1214, 1216, 1218, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1373, 1375, 1377, 1380, 1382, 1384, 1386, 1388, 1390, 1392, 1394, 1397, 1399, 1401, 1403, 1406, 1408, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1431, 1433, 1435, 1437, 1439, 1441, 1444, 1446, 1449, 1451, 1457, 1459, 1462, 1464, 1468, 1470, 1472, 1474, 1476, 1478, 1481, 1483, 1486, 1488, 1491, 1493, 1496, 1498, 1501, 1503, 1506, 1508, 1511, 1513, 1515, 1517, 1519, 1521, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1556, 1558, 1561, 1563, 1565, 1567, 1570, 1572, 1575, 1577, 1582, 1584, 1587, 1589, 1592, 1594, 1597, 1599, 1602, 1604, 1607, 1609, 1612, 1614, 1617, 1619, 1622, 1624, 1627, 1629, 1632, 1634, 1039, 1048, 1163, 1163, 136, 1163, 1163, 137, 760, 1442, 1442, 1429, 1429, 751, 1454, 1454, 1442, 1442, 572, 572, 760, 1395, 1395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1429, 1429, 1454, 1454, 1442, 1442, 1404, 1404, 1235, 1235, 1256, 1256, 1568, 1568, 751, 1454, 1454, 1442, 1442, 374, 374, 1454, 1454, 1442, 1442, 395, 395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1454, 1454, 1442, 1442, 494, 1395, 1395, 1404, 1404, 523, 1395, 1395, 1404, 1404, 572, 572, 751, 751, 572, 572, 751, 751, 1522, 1522, 1568, 1568, 572, 572, 751, 751, 917, 917, 1442, 1442, 1235, 1256, 1395, 1395, 1404, 1404, 769, 769, 769, 769, 751, 751, 769, 769, 769, 769, 760, 760, 770, 770, 867, 878, 835, 844, 867, 878, 917, 917, 957, 957, 1039, 1048, 1081, 1090, 1124, 1124, 1124, 1124, 1163, 1163, 1163, 1163, 1442, 1442, 1235, 1256, 1235, 1235, 1256, 1256, 1454, 1454, 1579, 1395, 1404, 1395, 1404, 1454, 1454, 1442, 1442, 1442, 1442, 1454, 1454, 1522, 1522, 1568, 1568, 1579, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3579, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3751, 3753, 3755, 3757, 3759, 3761, 3763, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 3801, 3803, 3805, 3807, 3809, 3811, 3813, 3815, 3817, 3819, 3821, 3823, 3825, 3827, 3829, 3831, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3885, 3887, 3889, 3891, 3893, 3895, 3897, 3899, 3901, 3903, 3905, 1655, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1681, 1688, 1689, 1690, 1691, 1696, 1697, 1698, 1701, 1702, 1708, 1709, 1710, 1722, 1723, 1726, 1727, 1730, 1731, 1733, 1734, 1737, 1738, 1741, 1742, 1744, 1745, 1748, 1751, 1765, 1766, 1781, 1782, 1788, 1789, 1790, 1791, 1794, 1795, 1797, 1800, 1801, 1804, 1805, 1806, 1807, 1811, 1812, 1815, 1816, 1817, 1818, 1822, 1823, 1826, 1827, 1830, 1831, 1843, 1844, 1847, 1848, 1854, 1855, 1858, 1859, 1879, 1884, 1885, 1888, 1889, 1897, 1902, 1903, 1906, 1907, 1908, 1909, 1910, 1911, 1913, 1914, 1915, 1916, 1923, 1926, 1929, 1930, 1931, 1932, 1933, 1934, 1940, 1946, 1958, 1959, 1962, 1964, 1983, 1984, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2006, 2008, 2022, 2025, 2031, 2033, 2039, 2042, 2050, 2051, 2058, 2061, 2082, 2085, 2094, 2097, 2107, 2108, 2110, 2111, 2119, 2120, 2122, 2123, 2135, 2136, 2138, 2140, 2152, 2153, 2166, 2167, 2176, 2177, 2184, 2197, 2200, 2213, 2216, 2219, 2220, 2223, 2224, 2230, 2231, 2234, 2235, 2257, 2258, 2269, 2270, 2273, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 4097, 4096, 4098, 4100, 4102, 4101, 4104, 4103, 4105, 4379, 4106, 4108, 4107, 4110, 4109, 4111, 4113, 4112, 4127, 4379, 4115, 4114, 4116, 4119, 4118, 4121, 4120, 4123, 4122, 4125, 4124, 4127, 4126, 4129, 4128, 4484, 4487, 4131, 4130, 4133, 4132, 4134, 4135, 4137, 4136, 4491, 4493, 4138, 4140, 4139, 572, 4496, 4203, 4202, 4498, 4143, 4142, 4144, 4145, 572, 4500, 4269, 4147, 4221, 4271, 4274, 4222, 4182, 4279, 4225, 4429, 4428, 4503, 4149, 4148, 4505, 4151, 4150, 4507, 4152, 4509, 4154, 4153, 4511, 4156, 4155, 4513, 4157, 4515, 4159, 4158, 4160, 4159, 4161, 4162, 4164, 4163, 4165, 4166, 4168, 4167, 4169, 4170, 4172, 4171, 4173, 4519, 4174, 4267, 4266, 4220, 4269, 4271, 4175, 4274, 4176, 4182, 4279, 4177, 4430, 4396, 4521, 4179, 4178, 4181, 4180, 4182, 4523, 4525, 572, 572, 4527, 572, 4186, 4185, 4530, 4188, 4187, 4532, 4534, 1466, 4191, 4190, 4536, 4193, 4192, 4538, 4540, 1466, 4203, 4195, 4542, 4400, 4399, 4544, 4205, 4196, 4546, 4197, 4198, 4211, 4199, 4213, 4212, 4267, 4200, 4201, 4203, 4202, 4548, 4400, 4399, 4550, 4205, 4204, 1429, 4442, 4441, 4552, 4440, 4439, 4554, 4444, 4443, 1466, 4207, 4260, 4209, 4208, 4268, 4211, 4210, 4213, 4212, 4214, 4220, 4215, 4221, 4274, 4222, 4216, 4279, 4218, 4429, 4219, 4557, 4280, 4416, 4559, 4220, 4269, 4221, 4271, 4274, 4222, 4223, 4279, 4225, 4429, 4428, 4562, 4430, 4396, 4564, 4566, 4568, 4226, 4570, 4572, 4227, 4229, 4228, 4230, 4232, 4231, 4234, 4233, 4236, 4235, 4576, 4578, 4580, 4238, 4237, 4239, 4242, 4241, 4244, 4243, 4245, 4248, 4247, 4250, 4249, 4252, 4251, 4254, 4253, 4255, 4257, 4256, 4259, 4258, 4584, 4260, 4261, 4262, 4264, 4263, 4265, 4267, 4266, 4268, 4270, 4269, 4272, 4271, 4274, 4273, 4275, 4277, 4279, 4278, 4429, 4428, 4588, 4280, 4416, 4590, 4592, 4594, 4596, 4282, 4281, 4598, 4600, 4602, 4284, 4283, 769, 769, 4459, 4454, 4285, 4473, 4464, 4286, 4287, 4466, 4288, 4290, 4289, 4292, 4291, 4294, 4293, 4296, 4295, 4298, 4297, 4299, 4301, 4304, 4303, 4305, 4307, 4306, 4309, 4308, 4310, 4312, 4314, 4313, 4315, 4318, 4317, 4612, 4320, 4319, 4322, 4321, 4324, 4323, 4326, 4325, 4328, 4327, 4330, 4329, 4332, 4331, 4333, 4334, 4335, 4337, 4336, 4339, 4338, 4341, 4340, 4342, 4343, 4344, 4346, 4345, 4348, 4347, 4350, 4349, 4351, 4353, 4352, 4354, 4356, 4355, 4358, 4357, 4379, 4382, 4359, 4361, 4360, 4362, 4364, 4363, 4365, 4620, 4366, 4622, 4367, 4369, 4368, 4370, 4372, 4371, 4373, 4624, 4374, 4626, 4375, 4377, 4376, 4378, 4379, 4382, 4381, 4383, 4459, 4384, 4394, 4628, 4385, 4386, 4387, 4389, 4388, 4391, 4418, 4393, 4392, 4422, 4421, 4394, 4395, 4632, 4429, 4428, 4430, 4396, 4391, 4418, 4393, 4392, 4422, 4421, 4394, 4395, 4634, 4429, 4428, 4430, 4396, 4398, 4397, 4400, 4399, 4636, 4401, 4402, 4404, 4403, 4406, 4406, 4409, 4408, 4419, 4410, 4421, 4411, 4412, 4414, 4426, 4415, 4429, 4428, 4431, 4416, 4418, 4417, 4420, 4419, 4422, 4421, 4423, 4425, 4427, 4426, 4429, 4428, 4431, 4430, 4433, 4432, 4643, 4435, 4434, 4645, 4437, 4436, 1429, 4440, 4439, 4647, 4442, 4441, 4649, 4444, 4443, 1466, 4446, 4447, 4448, 4449, 4451, 4450, 4453, 4452, 4457, 4459, 4454, 4461, 4455, 4464, 4463, 4456, 4467, 4466, 4651, 4457, 4460, 4459, 4462, 4461, 4464, 4463, 4465, 4467, 4466, 4653, 4469, 4468, 4471, 4470, 4473, 4472, 4475, 4474, 4476, 4478, 4477, 4480, 4479, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1679, 1680, 1682, 1683, 1684, 1685, 1686, 1687, 1692, 1693, 1694, 1695, 1699, 1700, 1703, 1704, 1705, 1706, 1707, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1724, 1725, 1728, 1729, 1732, 1735, 1736, 1739, 1740, 1743, 1746, 1747, 1749, 1750, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1783, 1784, 1785, 1786, 1787, 1792, 1793, 1796, 1798, 1799, 1802, 1803, 1808, 1809, 1810, 1813, 1814, 1819, 1820, 1821, 1824, 1825, 1828, 1829, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1845, 1846, 1849, 1850, 1851, 1852, 1853, 1856, 1857, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1880, 1881, 1882, 1883, 1886, 1887, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1898, 1899, 1900, 1901, 1904, 1905, 1912, 1917, 1918, 1919, 1920, 1921, 1922, 1924, 1925, 1927, 1928, 1935, 1936, 1937, 1938, 1939, 1941, 1942, 1943, 1944, 1945, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1960, 1961, 1963, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1985, 1986, 1995, 1996, 2003, 2004, 2005, 2007, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2023, 2024, 2026, 2027, 2028, 2029, 2030, 2032, 2034, 2035, 2036, 2037, 2038, 2040, 2041, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2052, 2053, 2054, 2055, 2056, 2057, 2059, 2060, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2083, 2084, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2095, 2096, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2109, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2121, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2137, 2139, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2178, 2179, 2180, 2181, 2182, 2183, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2198, 2199, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2214, 2215, 2217, 2218, 2221, 2222, 2225, 2226, 2227, 2228, 2229, 2232, 2233, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2271, 2272, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 4708, 4707, 4732, 4912, 4555, 4555, 4732, 4912, 4555, 4555, 4896, 4899, 4912, 4962, 4961, 4967, 4966, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 5249, 5253, 5255, 5260, 5262, 5265, 5269, 5272, 5274, 5276, 5278, 5280, 5282, 5284, 5286, 5290, 5293, 5296, 5298, 5303, 5305, 5307, 5310, 5312, 5314, 5316, 5319, 5321, 5324, 5326, 5330, 5334, 5338, 5342, 5344, 5346, 5348, 5351, 5353, 5355, 5357, 5363, 5365, 5368, 5370, 5373, 5375, 5377, 5381, 5383, 5385, 5388, 5390, 5392, 5395, 5397, 5399, 5404, 5407, 5409, 5411, 5413, 5415, 5418, 5420, 5422, 5424, 5426, 5428, 5431, 5433, 5435, 5439, 5442, 5444, 5446, 5448, 5451, 5453, 5456, 5458, 5460, 5462, 5465, 5467, 5472, 5475, 5478, 5480, 5482, 5486, 5488, 5490, 5492, 5494, 5498, 5500, 5502, 5505, 5507, 5509, 5511, 5513, 5515, 5519, 5522, 5524, 5528, 5531, 5533, 5535, 5537, 5539, 5541, 5543, 5545, 5550, 5552, 5554, 5559, 5561, 5563, 5566, 5569, 5571, 5574, 5576, 5579, 5584, 5587, 5592, 5596, 5599, 5605, 5607, 5609, 5611, 5615, 5617, 5619, 5621, 5623, 5627, 5629, 5631, 5633, 5637, 5641, 5643, 5645, 5649, 5651, 5653, 5655, 5657, 5659, 5663, 5665, 5667, 5669, 5671, 5673, 5676, 5678, 5680, 5687, 5689, 5692, 5694, 5696, 5699, 5702, 5704, 5706, 5709, 5711, 5713, 5715, 5717, 5720, 5722, 5517, 5250, 5525, 5263, 5556, 5060, 5058, 5070, 5068, 5517, 5516, 5525, 5263, 5556, 5060, 5058, 5070, 5068, 5517, 5516, 5525, 5547, 5556, 5060, 5058, 5070, 2399, 2400, 5634, 5470, 5299, 2419, 5301, 2424, 5361, 2431, 2432, 5287, 5317, 5322, 4811, 4819, 5328, 5291, 5332, 5331, 5336, 5335, 5294, 4494, 5360, 4494, 5361, 5470, 5299, 5682, 2503, 5301, 2508, 5361, 5317, 5322, 4811, 4819, 5328, 5327, 5332, 5331, 5336, 5335, 5401, 2555, 2556, 5359, 4528, 5360, 4528, 5361, 4811, 4819, 5379, 5378, 5401, 2605, 4897, 2607, 4900, 2613, 5470, 5469, 5682, 2641, 2642, 2644, 2645, 5496, 5495, 5517, 5516, 5525, 5547, 5556, 5060, 5058, 5070, 5068, 5634, 5684, 5597, 5684, 5602, 5601, 5603, 5613, 5625, 5634, 5684, 5638, 5639, 5684, 5682, 5690, 5700, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 5863, 5862, 5861, 5760, 2289, 2290, 5864, 5866, 5865, 2294, 5761, 5767, 5838, 5839, 5869, 5871, 5762, 5764, 5763, 2304, 5878, 5876, 2307, 5881, 5879, 5882, 5884, 5765, 5256, 5887, 5886, 2316, 2317, 5889, 5888, 2320, 2321, 5890, 5257, 5863, 5862, 5861, 5860, 2328, 2329, 5866, 5865, 5864, 2333, 5836, 5767, 5838, 5839, 5869, 5872, 5871, 5764, 5763, 2343, 5878, 5876, 2346, 5881, 5879, 5882, 5884, 5765, 5266, 5887, 5886, 2355, 2356, 5889, 5888, 2359, 2360, 5890, 5267, 5863, 5862, 5861, 5860, 2367, 2368, 5866, 5865, 5864, 2372, 5766, 5767, 5768, 5839, 5869, 5872, 5871, 5770, 5769, 2382, 5878, 5877, 2385, 5881, 5880, 5882, 5884, 5883, 5771, 5887, 5886, 2394, 2395, 5889, 5888, 2398, 5890, 5594, 5777, 5919, 5778, 5923, 5922, 5924, 2409, 2410, 2411, 5846, 5845, 5847, 5849, 5484, 5852, 5851, 2420, 4489, 5938, 5939, 2425, 4489, 5857, 5858, 5820, 5800, 5977, 5825, 5824, 5812, 5811, 5774, 5815, 5814, 5816, 2441, 5793, 5819, 5818, 5794, 5796, 4560, 5798, 5785, 5775, 2451, 5787, 5786, 2454, 5802, 5801, 2457, 5804, 5789, 5788, 2461, 2462, 2463, 5776, 2465, 2466, 5791, 2468, 2469, 5792, 5779, 5781, 4560, 5783, 2475, 2476, 2477, 2478, 5832, 5834, 5833, 5835, 2483, 4494, 5857, 5858, 5777, 5905, 5778, 5923, 5922, 5924, 2493, 2494, 2495, 5846, 5845, 5847, 5849, 5484, 5852, 5851, 2504, 4501, 5938, 5939, 2509, 4501, 5857, 5858, 5779, 5781, 4560, 5783, 5785, 5784, 2519, 5787, 5786, 2522, 5802, 5801, 2525, 5804, 5789, 5788, 2529, 2530, 2531, 5790, 2533, 2534, 5791, 2536, 2537, 5792, 5812, 5811, 5813, 5815, 5814, 5816, 2545, 5793, 5819, 5818, 5794, 5796, 4560, 5798, 5820, 5800, 6013, 5825, 5824, 2559, 2560, 2561, 2562, 5832, 5834, 5833, 5835, 2567, 4528, 5857, 5858, 5802, 5801, 2573, 5804, 5803, 2576, 5806, 5805, 5807, 2580, 2581, 5810, 5809, 5808, 5812, 5811, 5813, 5815, 5814, 5816, 2591, 5819, 5818, 5817, 5820, 5822, 4555, 5825, 5824, 5826, 5828, 4560, 5831, 5830, 2606, 2608, 5832, 5835, 5834, 5833, 4913, 5857, 5858, 5836, 5837, 5838, 5839, 5842, 5841, 5840, 5843, 5919, 5904, 5921, 5923, 5922, 5924, 2631, 2632, 2633, 5846, 5845, 5847, 5849, 5484, 5852, 5851, 6033, 4963, 6035, 4968, 5938, 5939, 2649, 2650, 5855, 5857, 5858, 5863, 5862, 5861, 5860, 2658, 2659, 5866, 5865, 5864, 2663, 5867, 5868, 5869, 5872, 5871, 5875, 5874, 5873, 2672, 5878, 5877, 5876, 2676, 5881, 5880, 5879, 5882, 5884, 5883, 5572, 5887, 5886, 2686, 2687, 5889, 5888, 2690, 2691, 5890, 5594, 5907, 5909, 5647, 5912, 5911, 5913, 5915, 5661, 5918, 5917, 5905, 5920, 5921, 5922, 5923, 5924, 2710, 2711, 5925, 2713, 5892, 5938, 5939, 5907, 5909, 5647, 5912, 5911, 5913, 5915, 5661, 5918, 5917, 5905, 5920, 5921, 5922, 5923, 5924, 2733, 2734, 2735, 5925, 2737, 5893, 5938, 5939, 5894, 5896, 2743, 5898, 5897, 5899, 5901, 2748, 5903, 5902, 5907, 5909, 5647, 5912, 5911, 5905, 5904, 5921, 5922, 5923, 5924, 2762, 2763, 5925, 2765, 5927, 5929, 5930, 2769, 5931, 5933, 5934, 5907, 5909, 5647, 5912, 5911, 5913, 5915, 5661, 5918, 5917, 5920, 5919, 5921, 5923, 5922, 5924, 2789, 2790, 5925, 2792, 5927, 5929, 5930, 2796, 5931, 5933, 5934, 5935, 5936, 5938, 5939, 58, 59, 60, 61, 62, 63, 2285, 2286, 2287, 2288, 6085, 2291, 2292, 2293, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2305, 2306, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 6112, 2318, 2319, 6116, 2322, 2323, 2324, 2325, 2326, 2327, 6124, 2330, 2331, 2332, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2344, 2345, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 6151, 2357, 2358, 6155, 2361, 2362, 2363, 2364, 2365, 2366, 6163, 2369, 2370, 2371, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2383, 2384, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 6190, 2396, 2397, 5967, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 6203, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 6212, 2421, 2422, 2423, 6216, 2426, 2427, 2428, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2452, 2453, 2455, 2456, 2458, 2459, 2460, 6253, 2464, 6256, 2467, 6259, 2470, 2471, 2472, 2473, 2474, 2479, 2480, 2481, 2482, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 6284, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 6293, 2505, 2506, 2507, 6297, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2520, 2521, 2523, 2524, 2526, 2527, 2528, 6319, 2532, 6322, 2535, 6325, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2557, 2558, 2563, 2564, 2565, 2566, 2568, 2569, 2570, 2571, 2572, 2574, 2575, 2577, 2578, 2579, 6368, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2609, 2610, 2611, 2612, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 6416, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2643, 2646, 2647, 2648, 6432, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 6441, 2660, 2661, 2662, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2673, 2674, 2675, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 6469, 2688, 2689, 6473, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 6493, 2712, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 6516, 2736, 2738, 2739, 2740, 2741, 2742, 2744, 2745, 2746, 2747, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 6545, 2764, 2766, 2767, 2768, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 6572, 2791, 2793, 2794, 2795, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 6267, 6265, 6348, 6346, 6026, 6024, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6593, 6595, 6598, 6606, 6608, 6610, 6612, 6615, 6618, 6621, 6626, 6628, 6631, 6639, 6641, 6643, 6645, 6648, 6651, 6654, 6659, 6661, 6664, 6672, 6674, 6676, 6678, 6681, 6684, 6687, 5968, 6692, 6695, 6204, 6699, 6704, 6716, 6718, 6721, 6724, 6731, 6733, 6735, 6737, 6751, 6757, 6760, 6285, 6764, 6769, 6783, 6785, 6787, 6789, 6798, 6801, 6804, 6813, 6816, 6822, 6824, 6826, 6830, 6833, 6836, 6839, 6845, 6850, 6853, 6863, 6867, 6870, 6417, 6874, 6879, 6889, 6891, 6894, 6900, 6902, 6905, 6908, 6912, 6915, 6918, 6926, 6931, 6933, 6936, 6947, 6952, 6954, 6957, 6517, 6967, 6971, 6976, 6978, 6981, 6995, 7000, 7002, 7005, 6602, 6600, 6623, 6635, 6633, 6656, 6668, 6666, 6689, 6700, 6705, 6707, 6709, 6711, 6713, 6726, 6728, 6743, 6741, 6739, 6745, 6747, 2865, 2866, 6273, 6754, 6765, 6770, 6772, 6774, 6776, 6778, 6780, 6795, 6793, 6791, 6806, 6808, 6810, 2895, 2896, 6354, 6819, 6841, 6846, 2911, 2912, 6028, 6856, 6860, 6858, 6920, 6875, 6427, 6425, 6882, 6884, 6886, 6896, 6920, 6922, 6927, 6938, 6495, 6941, 6943, 6948, 6519, 6962, 6964, 6968, 6972, 6983, 6547, 6986, 6551, 6989, 6991, 6996, 7007, 6574, 7010, 6578, 7013, 7015, 7017, 61, 62, 63, 7041, 6599, 7051, 6632, 7061, 6665, 6725, 6738, 6752, 6790, 6805, 6817, 6831, 6840, 6854, 6864, 7116, 6895, 6903, 6906, 6909, 6604, 2807, 2808, 7046, 7045, 7044, 7047, 7049, 7048, 2815, 6637, 2819, 2820, 7056, 7055, 7054, 7057, 7059, 7058, 2827, 6670, 2831, 2832, 7066, 7065, 7064, 7067, 7069, 7068, 2839, 7072, 7071, 7073, 2843, 6702, 2845, 2846, 2847, 2848, 2849, 6222, 7078, 7077, 2854, 2855, 7082, 7081, 7080, 2860, 2861, 2862, 2863, 2864, 7166, 2868, 2869, 7086, 7085, 7087, 2873, 6767, 2875, 2876, 2877, 2878, 2879, 2880, 7092, 7091, 7090, 2885, 2886, 2887, 7095, 7094, 2891, 2892, 2893, 6343, 7183, 2898, 2899, 7101, 7100, 7099, 7104, 7103, 2907, 6843, 2909, 6848, 7189, 2914, 2915, 6898, 2919, 2920, 7122, 7124, 7123, 2927, 7111, 7110, 7112, 2931, 6877, 2933, 2934, 2935, 2936, 2937, 6898, 2941, 7122, 7124, 7123, 2948, 2949, 6924, 2951, 6929, 7128, 7127, 2955, 2956, 2957, 2958, 6945, 2960, 6950, 7132, 7131, 7133, 2965, 2966, 2967, 6525, 2969, 6530, 2971, 6974, 7138, 7137, 2975, 2976, 2977, 2978, 2979, 2980, 6993, 2982, 6998, 7142, 7141, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7233, 7232, 2806, 2809, 2810, 2811, 2812, 2813, 2814, 7235, 7234, 2818, 2821, 2822, 2823, 2824, 2825, 2826, 7237, 7236, 2830, 2833, 2834, 2835, 2836, 2837, 2838, 2840, 2841, 2842, 2844, 2850, 2851, 2852, 6231, 7239, 2857, 2858, 2859, 7302, 6749, 2870, 2871, 2872, 2874, 7241, 2882, 2883, 2884, 7324, 2888, 2889, 6333, 2894, 6814, 2900, 2901, 2902, 6828, 2904, 2905, 6378, 2908, 2910, 6851, 7249, 7248, 2918, 7252, 7251, 7247, 2924, 2925, 2926, 2928, 2929, 2930, 2932, 7360, 7249, 7248, 2940, 7252, 7251, 7250, 2945, 2946, 2947, 2950, 2952, 2953, 2954, 2959, 2961, 2962, 2963, 2964, 2968, 2970, 2972, 2973, 2974, 2981, 2983, 2984, 2985, 7290, 7288, 7304, 7296, 7307, 7316, 7314, 7318, 7328, 7333, 7345, 7362, 7377, 7386, 7399, 7397, 7412, 7410, 7408, 61, 62, 63, 2804, 2805, 7254, 7428, 7432, 2816, 2817, 7264, 7437, 7441, 2828, 2829, 7274, 7446, 7450, 7452, 7457, 2853, 2856, 7462, 7303, 2867, 7466, 2881, 7472, 7325, 7475, 2890, 2897, 7480, 2903, 7484, 2906, 2913, 2916, 2917, 7348, 2921, 2922, 2923, 7497, 7499, 2938, 2939, 7365, 2942, 2943, 2944, 7511, 7515, 7519, 7525, 7529, 7286, 3004, 3005, 3006, 3009, 7292, 3011, 7312, 3015, 3016, 3019, 7330, 3021, 3022, 7342, 7340, 3028, 7357, 3035, 7502, 7372, 7370, 3043, 7381, 7379, 3047, 7392, 7390, 7388, 3052, 3053, 7403, 7401, 3057, 3058, 3059, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7553, 7255, 7429, 7558, 7265, 7438, 7563, 7275, 7447, 7460, 7470, 7481, 7587, 7349, 7590, 7595, 7598, 7556, 7561, 7566, 3002, 7567, 7607, 7568, 3010, 7306, 3013, 7574, 7614, 7578, 3020, 7332, 3024, 3025, 7583, 7344, 7592, 3033, 7593, 3036, 7600, 7601, 3041, 3042, 7602, 3045, 3046, 7603, 3049, 3050, 3051, 7635, 7604, 3055, 3056, 7639, 56, 57, 58, 59, 60, 61, 62, 63, 7571, 7576, 7591, 7599, 7680, 2994, 7682, 7683, 2997, 7685, 7686, 3000, 7688, 3003, 3007, 3012, 3014, 3018, 7710, 3023, 3026, 7691, 7713, 3029, 7692, 3031, 3034, 7719, 7695, 3038, 3040, 3044, 3048, 7730, 3054, 7640, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 2993, 2995, 2996, 2998, 2999, 3001, 7757, 7744, 7758, 7759, 7760, 7745, 7763, 3027, 7767, 3030, 7746, 7770, 3037, 7747, 7722, 7725, 7728, 7733, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7809, 7811, 7813, 3008, 3017, 7821, 3032, 3039, 7723, 7726, 7777, 7734, 7818, 7814, 7825, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7609, 7761, 7877, 7878, 7879, 7812, 7810, 7808, 3063, 3067, 3069, 7883, 7882, 7881, 7880, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7936, 7762, 3060, 3061, 3062, 7938, 7823, 7826, 3071, 3072, 3073, 3074, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7704, 7617, 8003, 3065, 3068, 3070, 8009, 8011, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8004, 8065, 8064, 8071, 8068, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3064, 3066, 8069, 3076, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8192, 8193, 3075, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8257, 8195, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 7945, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8128, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3077, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8512, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 64
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 3136
#define SIZE_OF_AC 5504
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[135*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 49*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 50*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 51*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 52*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 53*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 54*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 55*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 56*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 57*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 58*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 59*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 60*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 61*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 62*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 63*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
__syncthreads();
R[i + 64*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 65*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 66*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 67*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 68*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 69*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 70*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 71*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 72*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
__syncthreads();
R[i + 73*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 74*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 75*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 76*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 77*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 78*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 79*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 80*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 81*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
__syncthreads();
R[i + 82*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 83*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 84*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 85*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 86*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 87*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 88*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 89*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
__syncthreads();
R[i + 90*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 91*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 92*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 93*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 94*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
__syncthreads();
R[i + 95*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 96*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 97*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 98*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 99*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 100*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 101*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 102*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
__syncthreads();
R[i + 103*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 104*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 105*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 106*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 107*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 108*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 109*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
__syncthreads();
R[i + 110*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 111*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
R[i + 112*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
__syncthreads();
R[i + 113*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 114*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 115*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
__syncthreads();
R[i + 116*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 117*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
__syncthreads();
R[i + 118*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 119*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
__syncthreads();
R[i + 120*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
__syncthreads();
R[i + 121*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
__syncthreads();
R[i + 122*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
__syncthreads();
R[i + 123*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
__syncthreads();
R[i + 124*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
__syncthreads();
R[i + 125*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
__syncthreads();
R[i + 126*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
__syncthreads();
R[i + 127*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
__syncthreads();
R[i + 128*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
__syncthreads();
R[i + 129*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
__syncthreads();
R[i + 130*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
__syncthreads();
R[i + 131*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
__syncthreads();
R[i + 132*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
__syncthreads();
R[i + 133*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
__syncthreads();
R[i + 134*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
if (i==0) { final += R[134*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
ca988b8a8dd69d28bb0b7723aeb00e66a1679a00.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kSubtract.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *dest = NULL;
hipMalloc(&dest, XSIZE*YSIZE);
unsigned int numEls = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kSubtract), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kSubtract), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kSubtract), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,dest,numEls);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ca988b8a8dd69d28bb0b7723aeb00e66a1679a00.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kSubtract.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *dest = NULL;
cudaMalloc(&dest, XSIZE*YSIZE);
unsigned int numEls = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kSubtract<<<gridBlock,threadBlock>>>(a,b,dest,numEls);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kSubtract<<<gridBlock,threadBlock>>>(a,b,dest,numEls);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kSubtract<<<gridBlock,threadBlock>>>(a,b,dest,numEls);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a1e4861c5cc7f19002fe3e0665873a9340e4a96e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zlobpcg_shift.cu normal z -> c, Tue Feb 9 16:05:41 2016
*/
#include "magmasparse_internal.h"
__global__ void
magma_clobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
magmaFloatComplex tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaFloatComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaFloatComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( float( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_clobpcg_shift_kernel), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| a1e4861c5cc7f19002fe3e0665873a9340e4a96e.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zlobpcg_shift.cu normal z -> c, Tue Feb 9 16:05:41 2016
*/
#include "magmasparse_internal.h"
__global__ void
magma_clobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex * x )
{
int idx = threadIdx.x; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if ( row<num_rows) {
magmaFloatComplex tmp = x[idx];
__syncthreads();
if ( idx > shift-1 ) {
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in,out]
x magmaFloatComplex_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloatComplex_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( magmaFloatComplex );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = int( sqrt( float( num_rows )));
int dimgrid2 = magma_ceildiv( num_rows, dimgrid1 );
dim3 grid( dimgrid1, dimgrid2, 1);
magma_clobpcg_shift_kernel<<< grid, block, Ms, queue->cuda_stream() >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
1d199166001da3a1620b0b9b70064be4790698a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/dns_inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCMomentCollect(const int n, const Dtype *wb, const Dtype *mask,
Dtype *mu_, Dtype *std_, unsigned int *count) {
const int NUM_THREADS = 512;
__shared__ Dtype param[4 * NUM_THREADS];
__shared__ unsigned int tcount[2 * NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
if (s + t < n) {
param[t] = fabs(mask[s + t] * wb[s + t]);
param[t + 2 * NUM_THREADS] = mask[s + t] * wb[s + t] * wb[s + t];
if (mask[s + t] * wb[s + t] != 0)
tcount[t] = 1;
else
tcount[t] = 0;
} else {
param[t] = 0;
param[t + 2 * NUM_THREADS] = 0;
tcount[t] = 0;
}
if (s + t + NUM_THREADS < n) {
param[t + NUM_THREADS] =
fabs(mask[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS]);
param[t + 3 * NUM_THREADS] = mask[s + t + NUM_THREADS] *
wb[s + t + NUM_THREADS] *
wb[s + t + NUM_THREADS];
if (mask[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS] != 0)
tcount[t + NUM_THREADS] = 1;
else
tcount[t + NUM_THREADS] = 0;
} else {
param[t + NUM_THREADS] = 0;
param[t + 3 * NUM_THREADS] = 0;
tcount[t + NUM_THREADS] = 0;
}
__syncthreads();
for (unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride) {
param[t] += param[t + stride];
param[t + 2 * NUM_THREADS] += param[t + 2 * NUM_THREADS + stride];
tcount[t] += tcount[t + stride];
}
__syncthreads();
}
if (t == 0) {
mu_[blockIdx.x] = param[0];
std_[blockIdx.x] = param[2 * NUM_THREADS];
count[blockIdx.x] = tcount[0];
}
}
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCNzeroCollect(const int n, const Dtype *mask,
unsigned int *count) {
const int NUM_THREADS = 512;
__shared__ unsigned int tcount[2 * NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
tcount[t] = 0;
if (s + t < n && mask[s + t] != 0) {
tcount[t] = 1;
}
tcount[t + NUM_THREADS] = 0;
if (s + t + NUM_THREADS < n && mask[s + t + NUM_THREADS] != 0) {
tcount[t + NUM_THREADS] = 1;
}
__syncthreads();
for (unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride) {
tcount[t] += tcount[t + stride];
}
__syncthreads();
}
if (t == 0) {
count[blockIdx.x] = tcount[0];
}
}
template <typename Dtype>
__global__ void CCMaskCalc(const int n, const Dtype *wb, Dtype *mask, Dtype mu_,
Dtype std_, Dtype r_, Dtype low_, Dtype high_) {
CUDA_KERNEL_LOOP(index, n) {
if (mask[index] == 1 &&
fabs(wb[index]) <= low_ * max(mu_ + r_ * std_, Dtype(0)))
mask[index] = 0;
else if (mask[index] == 0 &&
fabs(wb[index]) > high_ * max(mu_ + r_ * std_, Dtype(0)))
mask[index] = 1;
}
}
template <typename Dtype>
__global__ void CCMaskApply(const int n, const Dtype *wb, const Dtype *mask,
Dtype *wb_t) {
CUDA_KERNEL_LOOP(index, n) { wb_t[index] = wb[index] * mask[index]; }
}
template <typename Dtype>
void CCMomentCalc(const int n, const Dtype *wb, const Dtype *mask, Dtype *mu_,
Dtype *std_, unsigned int *ncount) {
const unsigned int NUM_THREADS = 512;
Dtype *pmu_g;
Dtype *pstd_g;
unsigned int *pncount_g;
Dtype *pmu_c;
Dtype *pstd_c;
unsigned int *pncount_c;
int num_p = (n + (NUM_THREADS << 1) - 1) / (NUM_THREADS << 1);
hipMalloc(&pmu_g, sizeof(Dtype) * num_p);
hipMalloc(&pstd_g, sizeof(Dtype) * num_p);
hipMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pmu_c = (Dtype *)malloc(num_p * sizeof(Dtype));
pstd_c = (Dtype *)malloc(num_p * sizeof(Dtype));
pncount_c = (unsigned int *)malloc(num_p * sizeof(unsigned int));
hipLaunchKernelGGL(( CCMomentCollect<Dtype>)
, dim3(num_p), dim3(NUM_THREADS), 0, 0, n, wb, mask, pmu_g, pstd_g, pncount_g);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost);
hipMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, hipMemcpyDeviceToHost);
hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p,
hipMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*mu_ += pmu_c[i];
*std_ += pstd_c[i];
*ncount += pncount_c[i];
}
hipFree(pmu_g);
hipFree(pstd_g);
hipFree(pncount_g);
free(pmu_c);
free(pstd_c);
free(pncount_c);
}
template <typename Dtype>
void CCNZeroCalc(const int n, const Dtype *mask, unsigned int *ncount) {
const unsigned int NUM_THREADS = 512;
unsigned int *pncount_g;
unsigned int *pncount_c;
int num_p = (n + (NUM_THREADS << 1) - 1) / (NUM_THREADS << 1);
hipMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pncount_c = (unsigned int *)malloc(num_p * sizeof(unsigned int));
hipLaunchKernelGGL(( CCNzeroCollect<Dtype>), dim3(num_p), dim3(NUM_THREADS), 0, 0, n, mask, pncount_g);
CUDA_POST_KERNEL_CHECK;
hipMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p,
hipMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*ncount += pncount_c[i];
}
hipFree(pncount_g);
free(pncount_c);
}
template <typename Dtype>
void DNSInnerProductLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) {
const Dtype *weight = this->blobs_[0]->gpu_data();
Dtype *weightMask = NULL;
Dtype *weightTmp = this->weight_tmp_.mutable_gpu_data();
const Dtype *bias = NULL;
Dtype *biasMask = NULL;
Dtype *biasTmp = NULL;
if (this->bias_term_) {
weightMask = this->blobs_[2]->mutable_gpu_data();
bias = this->blobs_[1]->gpu_data();
biasMask = this->blobs_[3]->mutable_gpu_data();
biasTmp = this->bias_tmp_.mutable_gpu_data();
} else {
weightMask = this->blobs_[1]->mutable_gpu_data();
}
if (this->phase_ == TRAIN) {
// Calculate the mean and standard deviation of learnable parameters
if (this->std_ == 0 && this->iter_ == 0) {
unsigned int nz_w = 0, nz_b = 0, ncount = 0;
CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu_, &std_,
&nz_w);
if (this->bias_term_) {
CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu_, &std_,
&nz_b);
}
ncount = nz_w + nz_b;
this->mu_ /= ncount;
this->std_ -= ncount * mu_ * mu_;
this->std_ /= ncount;
this->std_ = sqrt(std_);
// output the percentage of kept parameters.
LOG_IF(INFO, Caffe::root_solver()) << "[" << this->name() << "] "
<< "mu_:" << mu_ << " " << "std_:" << std_ << ", "
<< "kept: weight: " << nz_w << "/" << this->blobs_[0]->count()
<< "(" << Dtype(nz_w) / this->blobs_[0]->count()*100 << "%), "
<< "bias: " << nz_b << "/" << this->blobs_[1]->count()
<< "(" << Dtype(nz_b) / this->blobs_[1]->count()*100 << "%), "
<<"total: " << ncount << "/" << this->blobs_[0]->count() + this->blobs_[1]->count()
<< "(" << Dtype(ncount) / (this->blobs_[0]->count() + this->blobs_[1]->count())*100 << "%)"
<< "\n";
}
// Calculate the weight mask and bias mask with probability
// LOG(INFO) << rand()<<" "<<rand()<<" "<<rand()<<" "<<rand()<<"
// "<<rand()<< "\n";
Dtype r_ = static_cast<Dtype>(rand()) / static_cast<Dtype>(RAND_MAX);
// LOG(INFO) << "r_ = " << r_ << "\n";
if (pow(1 + (this->gamma_) * (this->iter_), -(this->power_)) > r_ &&
(this->iter_) < (this->iter_stop_)) {
hipLaunchKernelGGL(( CCMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
this->blobs_[0]->count(), weight, weightMask, this->mu_, this->std_,
this->c_rate_, this->alpha_low_, this->alpha_high_);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
hipLaunchKernelGGL(( CCMaskCalc<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
this->blobs_[1]->count(), bias, biasMask, this->mu_, this->std_,
this->c_rate_, this->alpha_low_, this->alpha_high_);
CUDA_POST_KERNEL_CHECK;
}
}
}
// Calculate the current (masked) weight and bias
hipLaunchKernelGGL(( CCMaskApply<Dtype>)
, dim3(CAFFE_GET_BLOCKS(this->blobs_[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
this->blobs_[0]->count(), weight, weightMask, weightTmp);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[1]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, this->blobs_[1]->count(),
bias, biasMask, biasTmp);
CUDA_POST_KERNEL_CHECK;
}
// Forward calculation with (masked) weight and bias
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype *top_data = top[0]->mutable_gpu_data();
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1., weightTmp,
bottom_data, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], biasTmp,
top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1., bottom_data, weightTmp,
(Dtype)0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(), biasTmp, (Dtype)1.,
top_data);
}
}
template <typename Dtype>
void DNSInnerProductLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
const Dtype *top_diff = top[0]->gpu_diff();
if (this->param_propagate_down_[0]) {
const Dtype* weightMask = NULL;
if (this->bias_term_) {
weightMask = this->blobs_[2]->gpu_data();
} else {
weightMask = this->blobs_[1]->gpu_data();
}
Dtype *weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype *bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[2]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
this->blobs_[2]->count(), weight_diff, weightMask, weight_diff);
CUDA_POST_KERNEL_CHECK;
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1.,
bottom_data, top_diff, (Dtype)1.,
this->blobs_[0]->mutable_gpu_diff());
} else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)1.,
this->blobs_[0]->mutable_gpu_diff());
}
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype *biasMask = this->blobs_[3]->gpu_data();
Dtype *bias_diff = this->blobs_[1]->mutable_gpu_diff();
// Gradient with respect to bias
hipLaunchKernelGGL(( CCMaskApply<Dtype>), dim3(CAFFE_GET_BLOCKS(this->blobs_[3]->count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1., bias_diff);
}
if (propagate_down[0]) {
const Dtype *weightTmp = this->weight_tmp_.gpu_data();
// Gradient with respect to bottom data
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1.,
top_diff, weightTmp, (Dtype)0.,
bottom[0]->mutable_gpu_diff());
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, weightTmp, (Dtype)0.,
bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DNSInnerProductLayer);
} // namespace caffe
| 1d199166001da3a1620b0b9b70064be4790698a1.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/dns_inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCMomentCollect(const int n, const Dtype *wb, const Dtype *mask,
Dtype *mu_, Dtype *std_, unsigned int *count) {
const int NUM_THREADS = 512;
__shared__ Dtype param[4 * NUM_THREADS];
__shared__ unsigned int tcount[2 * NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
if (s + t < n) {
param[t] = fabs(mask[s + t] * wb[s + t]);
param[t + 2 * NUM_THREADS] = mask[s + t] * wb[s + t] * wb[s + t];
if (mask[s + t] * wb[s + t] != 0)
tcount[t] = 1;
else
tcount[t] = 0;
} else {
param[t] = 0;
param[t + 2 * NUM_THREADS] = 0;
tcount[t] = 0;
}
if (s + t + NUM_THREADS < n) {
param[t + NUM_THREADS] =
fabs(mask[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS]);
param[t + 3 * NUM_THREADS] = mask[s + t + NUM_THREADS] *
wb[s + t + NUM_THREADS] *
wb[s + t + NUM_THREADS];
if (mask[s + t + NUM_THREADS] * wb[s + t + NUM_THREADS] != 0)
tcount[t + NUM_THREADS] = 1;
else
tcount[t + NUM_THREADS] = 0;
} else {
param[t + NUM_THREADS] = 0;
param[t + 3 * NUM_THREADS] = 0;
tcount[t + NUM_THREADS] = 0;
}
__syncthreads();
for (unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride) {
param[t] += param[t + stride];
param[t + 2 * NUM_THREADS] += param[t + 2 * NUM_THREADS + stride];
tcount[t] += tcount[t + stride];
}
__syncthreads();
}
if (t == 0) {
mu_[blockIdx.x] = param[0];
std_[blockIdx.x] = param[2 * NUM_THREADS];
count[blockIdx.x] = tcount[0];
}
}
// The constant NUM_THREADS should be equal to the value in CCMomentCalc
template <typename Dtype>
__global__ void CCNzeroCollect(const int n, const Dtype *mask,
unsigned int *count) {
const int NUM_THREADS = 512;
__shared__ unsigned int tcount[2 * NUM_THREADS];
unsigned int t = threadIdx.x;
unsigned int s = 2 * blockIdx.x * NUM_THREADS;
tcount[t] = 0;
if (s + t < n && mask[s + t] != 0) {
tcount[t] = 1;
}
tcount[t + NUM_THREADS] = 0;
if (s + t + NUM_THREADS < n && mask[s + t + NUM_THREADS] != 0) {
tcount[t + NUM_THREADS] = 1;
}
__syncthreads();
for (unsigned int stride = NUM_THREADS; stride >= 1; stride >>= 1) {
if (t < stride) {
tcount[t] += tcount[t + stride];
}
__syncthreads();
}
if (t == 0) {
count[blockIdx.x] = tcount[0];
}
}
template <typename Dtype>
__global__ void CCMaskCalc(const int n, const Dtype *wb, Dtype *mask, Dtype mu_,
Dtype std_, Dtype r_, Dtype low_, Dtype high_) {
CUDA_KERNEL_LOOP(index, n) {
if (mask[index] == 1 &&
fabs(wb[index]) <= low_ * max(mu_ + r_ * std_, Dtype(0)))
mask[index] = 0;
else if (mask[index] == 0 &&
fabs(wb[index]) > high_ * max(mu_ + r_ * std_, Dtype(0)))
mask[index] = 1;
}
}
template <typename Dtype>
__global__ void CCMaskApply(const int n, const Dtype *wb, const Dtype *mask,
Dtype *wb_t) {
CUDA_KERNEL_LOOP(index, n) { wb_t[index] = wb[index] * mask[index]; }
}
template <typename Dtype>
void CCMomentCalc(const int n, const Dtype *wb, const Dtype *mask, Dtype *mu_,
Dtype *std_, unsigned int *ncount) {
const unsigned int NUM_THREADS = 512;
Dtype *pmu_g;
Dtype *pstd_g;
unsigned int *pncount_g;
Dtype *pmu_c;
Dtype *pstd_c;
unsigned int *pncount_c;
int num_p = (n + (NUM_THREADS << 1) - 1) / (NUM_THREADS << 1);
cudaMalloc(&pmu_g, sizeof(Dtype) * num_p);
cudaMalloc(&pstd_g, sizeof(Dtype) * num_p);
cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pmu_c = (Dtype *)malloc(num_p * sizeof(Dtype));
pstd_c = (Dtype *)malloc(num_p * sizeof(Dtype));
pncount_c = (unsigned int *)malloc(num_p * sizeof(unsigned int));
CCMomentCollect<Dtype>
<<<num_p, NUM_THREADS>>>(n, wb, mask, pmu_g, pstd_g, pncount_g);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(pmu_c, pmu_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost);
cudaMemcpy(pstd_c, pstd_g, sizeof(Dtype) * num_p, cudaMemcpyDeviceToHost);
cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p,
cudaMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*mu_ += pmu_c[i];
*std_ += pstd_c[i];
*ncount += pncount_c[i];
}
cudaFree(pmu_g);
cudaFree(pstd_g);
cudaFree(pncount_g);
free(pmu_c);
free(pstd_c);
free(pncount_c);
}
template <typename Dtype>
void CCNZeroCalc(const int n, const Dtype *mask, unsigned int *ncount) {
const unsigned int NUM_THREADS = 512;
unsigned int *pncount_g;
unsigned int *pncount_c;
int num_p = (n + (NUM_THREADS << 1) - 1) / (NUM_THREADS << 1);
cudaMalloc(&pncount_g, sizeof(unsigned int) * num_p);
pncount_c = (unsigned int *)malloc(num_p * sizeof(unsigned int));
CCNzeroCollect<Dtype><<<num_p, NUM_THREADS>>>(n, mask, pncount_g);
CUDA_POST_KERNEL_CHECK;
cudaMemcpy(pncount_c, pncount_g, sizeof(unsigned int) * num_p,
cudaMemcpyDeviceToHost);
for (int i = 0; i < num_p; i++) {
*ncount += pncount_c[i];
}
cudaFree(pncount_g);
free(pncount_c);
}
template <typename Dtype>
void DNSInnerProductLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) {
const Dtype *weight = this->blobs_[0]->gpu_data();
Dtype *weightMask = NULL;
Dtype *weightTmp = this->weight_tmp_.mutable_gpu_data();
const Dtype *bias = NULL;
Dtype *biasMask = NULL;
Dtype *biasTmp = NULL;
if (this->bias_term_) {
weightMask = this->blobs_[2]->mutable_gpu_data();
bias = this->blobs_[1]->gpu_data();
biasMask = this->blobs_[3]->mutable_gpu_data();
biasTmp = this->bias_tmp_.mutable_gpu_data();
} else {
weightMask = this->blobs_[1]->mutable_gpu_data();
}
if (this->phase_ == TRAIN) {
// Calculate the mean and standard deviation of learnable parameters
if (this->std_ == 0 && this->iter_ == 0) {
unsigned int nz_w = 0, nz_b = 0, ncount = 0;
CCMomentCalc(this->blobs_[0]->count(), weight, weightMask, &mu_, &std_,
&nz_w);
if (this->bias_term_) {
CCMomentCalc(this->blobs_[1]->count(), bias, biasMask, &mu_, &std_,
&nz_b);
}
ncount = nz_w + nz_b;
this->mu_ /= ncount;
this->std_ -= ncount * mu_ * mu_;
this->std_ /= ncount;
this->std_ = sqrt(std_);
// output the percentage of kept parameters.
LOG_IF(INFO, Caffe::root_solver()) << "[" << this->name() << "] "
<< "mu_:" << mu_ << " " << "std_:" << std_ << ", "
<< "kept: weight: " << nz_w << "/" << this->blobs_[0]->count()
<< "(" << Dtype(nz_w) / this->blobs_[0]->count()*100 << "%), "
<< "bias: " << nz_b << "/" << this->blobs_[1]->count()
<< "(" << Dtype(nz_b) / this->blobs_[1]->count()*100 << "%), "
<<"total: " << ncount << "/" << this->blobs_[0]->count() + this->blobs_[1]->count()
<< "(" << Dtype(ncount) / (this->blobs_[0]->count() + this->blobs_[1]->count())*100 << "%)"
<< "\n";
}
// Calculate the weight mask and bias mask with probability
// LOG(INFO) << rand()<<" "<<rand()<<" "<<rand()<<" "<<rand()<<"
// "<<rand()<< "\n";
Dtype r_ = static_cast<Dtype>(rand()) / static_cast<Dtype>(RAND_MAX);
// LOG(INFO) << "r_ = " << r_ << "\n";
if (pow(1 + (this->gamma_) * (this->iter_), -(this->power_)) > r_ &&
(this->iter_) < (this->iter_stop_)) {
CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()),
CAFFE_CUDA_NUM_THREADS>>>(
this->blobs_[0]->count(), weight, weightMask, this->mu_, this->std_,
this->c_rate_, this->alpha_low_, this->alpha_high_);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
CCMaskCalc<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()),
CAFFE_CUDA_NUM_THREADS>>>(
this->blobs_[1]->count(), bias, biasMask, this->mu_, this->std_,
this->c_rate_, this->alpha_low_, this->alpha_high_);
CUDA_POST_KERNEL_CHECK;
}
}
}
// Calculate the current (masked) weight and bias
CCMaskApply<Dtype>
<<<CAFFE_GET_BLOCKS(this->blobs_[0]->count()), CAFFE_CUDA_NUM_THREADS>>>(
this->blobs_[0]->count(), weight, weightMask, weightTmp);
CUDA_POST_KERNEL_CHECK;
if (this->bias_term_) {
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[1]->count()),
CAFFE_CUDA_NUM_THREADS>>>(this->blobs_[1]->count(),
bias, biasMask, biasTmp);
CUDA_POST_KERNEL_CHECK;
}
// Forward calculation with (masked) weight and bias
const Dtype *bottom_data = bottom[0]->gpu_data();
Dtype *top_data = top[0]->mutable_gpu_data();
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1., weightTmp,
bottom_data, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], biasTmp,
top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1., bottom_data, weightTmp,
(Dtype)0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(), biasTmp, (Dtype)1.,
top_data);
}
}
template <typename Dtype>
void DNSInnerProductLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down,
const vector<Blob<Dtype> *> &bottom) {
const Dtype *top_diff = top[0]->gpu_diff();
if (this->param_propagate_down_[0]) {
const Dtype* weightMask = NULL;
if (this->bias_term_) {
weightMask = this->blobs_[2]->gpu_data();
} else {
weightMask = this->blobs_[1]->gpu_data();
}
Dtype *weight_diff = this->blobs_[0]->mutable_gpu_diff();
const Dtype *bottom_data = bottom[0]->gpu_data();
// Gradient with respect to weight
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[2]->count()),
CAFFE_CUDA_NUM_THREADS>>>(
this->blobs_[2]->count(), weight_diff, weightMask, weight_diff);
CUDA_POST_KERNEL_CHECK;
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1.,
bottom_data, top_diff, (Dtype)1.,
this->blobs_[0]->mutable_gpu_diff());
} else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
top_diff, bottom_data, (Dtype)1.,
this->blobs_[0]->mutable_gpu_diff());
}
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype *biasMask = this->blobs_[3]->gpu_data();
Dtype *bias_diff = this->blobs_[1]->mutable_gpu_diff();
// Gradient with respect to bias
CCMaskApply<Dtype><<<CAFFE_GET_BLOCKS(this->blobs_[3]->count()),
CAFFE_CUDA_NUM_THREADS>>>(
this->blobs_[3]->count(), bias_diff, biasMask, bias_diff);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1., bias_diff);
}
if (propagate_down[0]) {
const Dtype *weightTmp = this->weight_tmp_.gpu_data();
// Gradient with respect to bottom data
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1.,
top_diff, weightTmp, (Dtype)0.,
bottom[0]->mutable_gpu_diff());
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
top_diff, weightTmp, (Dtype)0.,
bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DNSInnerProductLayer);
} // namespace caffe
|
1cfe5a9bc99411edeb5c67fedb92e90296c57e27.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "matutils.h"
#include "gradients.h"
#include "cumath.h"
using cumath::ij_2_index;
using cumath::index_2_ij;
using cumath::ijk_2_index;
#define NMAX 30
__constant__ double gradient_coeffients_dev[NMAX+10];
/** Mathematica code to generate these coeffients
Reference: http://www.trentfguidry.net/post/2009/07/12/Calculate-derivatives-function-numerically.aspx
For[n = 1, n <= 25, n++,
A = Table[2 j^i/i!, {j, 1, n}, {i, 1, 2 n - 1, 2}];
B = Inverse[A];
Print[B[[1]]];
]
For[n = 1, n <= 30, n++,
A = Table[2 j^i/i!, {j, 1, n}, {i, 1, 2 n - 1, 2}];
B = Inverse[A];
Print[];
Print[EngineeringForm[N[B[[1]], 200], 40,
NumberFormat -> (SequenceForm[#1, "E", #3] &)]];
]
**/
void copy_gradient_coefficients_to_device(const int n_points)
{
const double a[] = {
// 1 0
500.0000000000000000000000000000000000000E-3,
// 2 1
666.6666666666666666666666666666666666667E-3,
-83.33333333333333333333333333333333333333E-3,
// 3 3
750.0000000000000000000000000000000000000E-3,
-150.0000000000000000000000000000000000000E-3,
16.66666666666666666666666666666666666667E-3,
// 4 6
800.0000000000000000000000000000000000000E-3,
-200.0000000000000000000000000000000000000E-3,
38.09523809523809523809523809523809523810E-3,
-3.571428571428571428571428571428571428571E-3,
// 5 10
833.3333333333333333333333333333333333333E-3,
-238.0952380952380952380952380952380952381E-3,
59.52380952380952380952380952380952380952E-3,
-9.920634920634920634920634920634920634921E-3,
793.6507936507936507936507936507936507937E-6,
// 6 15
857.1428571428571428571428571428571428571E-3,
-267.8571428571428571428571428571428571429E-3,
79.36507936507936507936507936507936507937E-3,
-17.85714285714285714285714285714285714286E-3,
2.597402597402597402597402597402597402597E-3,
-180.3751803751803751803751803751803751804E-6,
// 7 21
875.0000000000000000000000000000000000000E-3,
-291.6666666666666666666666666666666666667E-3,
97.22222222222222222222222222222222222222E-3,
-26.51515151515151515151515151515151515152E-3,
5.303030303030303030303030303030303030303E-3,
-679.8756798756798756798756798756798756799E-6,
41.62504162504162504162504162504162504163E-6,
// 8 28
888.8888888888888888888888888888888888889E-3,
-311.1111111111111111111111111111111111111E-3,
113.1313131313131313131313131313131313131E-3,
-35.35353535353535353535353535353535353535E-3,
8.702408702408702408702408702408702408702E-3,
-1.554001554001554001554001554001554001554E-3,
177.6001776001776001776001776001776001776E-6,
-9.712509712509712509712509712509712509713E-6,
// 9 36
900.0000000000000000000000000000000000000E-3,
-327.2727272727272727272727272727272727273E-3,
127.2727272727272727272727272727272727273E-3,
-44.05594405594405594405594405594405594406E-3,
12.58741258741258741258741258741258741259E-3,
-2.797202797202797202797202797202797202797E-3,
449.5504495504495504495504495504495504496E-6,
-46.27725215960510078157136980666392431098E-6,
2.285296402943461766991178755884638237579E-6,
// 10 45
909.0909090909090909090909090909090909091E-3,
-340.9090909090909090909090909090909090909E-3,
139.8601398601398601398601398601398601399E-3,
-52.44755244755244755244755244755244755245E-3,
16.78321678321678321678321678321678321678E-3,
-4.370629370629370629370629370629370629371E-3,
881.4714697067638244108832344126461773521E-6,
-128.5479226655697243932538050185109008638E-6,
12.02787580496558824732199345202441177673E-6,
-541.2544112234514711294897053410985299530E-9,
// 11 55
916.6666666666666666666666666666666666667E-3,
-352.5641025641025641025641025641025641026E-3,
151.0989010989010989010989010989010989011E-3,
-60.43956043956043956043956043956043956044E-3,
21.15384615384615384615384615384615384615E-3,
-6.221719457013574660633484162895927601810E-3,
1.481361775479422538246067657832363714717E-3,
-272.8824323251567833611177264428038421846E-6,
36.38432431002090444814903019237384562462E-6,
-3.118656369430363238412774016489186767825E-6,
128.8700979103455883641642155574044118936E-9,
// 12 66
923.0769230769230769230769230769230769231E-3,
-362.6373626373626373626373626373626373626E-3,
161.1721611721611721611721611721611721612E-3,
-67.99450549450549450549450549450549450549E-3,
25.59793148028442146089204912734324499030E-3,
-8.295625942684766214177978883861236802413E-3,
2.245432585989861531657197291872214472834E-3,
-491.1883781852822100500119075970469159324E-6,
83.16416985147635302434064043971164714199E-6,
-10.20651175449937059844180587214642942197E-6,
806.8388738734680314973759582724450135946E-9,
-30.81676254377829286969144285068366371368E-9,
// 13 78
928.5714285714285714285714285714285714286E-3,
-371.4285714285714285714285714285714285714E-3,
170.2380952380952380952380952380952380952E-3,
-75.10504201680672268907563025210084033613E-3,
30.04201680672268907563025210084033613445E-3,
-10.54105852867462774583517617573345127525E-3,
3.162317558602388323750552852720035382574E-3,
-790.5793896505970809376382131800088456435E-6,
159.7130080102216325126541844808098678068E-6,
-24.99855777551295117589369844047458800454E-6,
2.840745201762835360897011186417566818698E-6,
-208.3213147959412597991141536706215667045E-9,
7.396023010506790288725946284164079291283E-9,
// 14 91
933.3333333333333333333333333333333333333E-3,
-379.1666666666666666666666666666666666667E-3,
178.4313725490196078431372549019607843137E-3,
-81.78104575163398692810457516339869281046E-3,
34.43412452700378396972824217406260749914E-3,
-12.91279669762641898864809081527347781218E-3,
4.216423411469851098334070470293380510099E-3,
-1.173890608875128998968008255933952528380E-3,
272.2065180000299128041758274629455138272E-6,
-51.03872212500560865078296764930228384260E-6,
7.423814127273543076477522567171241286196E-6,
-785.2111096154709023197379638354197514246E-9,
53.68964852071595913297353598874664967006E-9,
-1.780524058084968032471061142483945014568E-9,
// 15 105
937.5000000000000000000000000000000000000E-3,
-386.0294117647058823529411764705882352941E-3,
185.8660130718954248366013071895424836601E-3,
-88.04179566563467492260061919504643962848E-3,
38.73839009287925696594427244582043343653E-3,
-15.37237702098383212934296525627794977640E-3,
5.390314020344980097302078726227333038479E-3,
-1.640530354018037420918023960156144837798E-3,
425.3226843750467387565247304108523653550E-6,
-91.86969982501009557140934176874411091668E-6,
16.06113633304372300199464016936085855187E-6,
-2.181141971154085839777049899542832642846E-6,
215.7173378064480500878400999547856459958E-9,
-13.81441079548682094158581920892715959579E-9,
429.7816691929233181826699309444005207579E-12,
// 16 120
941.1764705882352941176470588235294117647E-3,
-392.1568627450980392156862745098039215686E-3,
192.6384588923288613691090471276229790162E-3,
-93.91124871001031991744066047471620227038E-3,
42.93085655314757481940144478844169246646E-3,
-17.88785689714482284141726866185070519436E-3,
6.666282073470120313571652917459890134544E-3,
-2.187373805357383227890698613541526450397E-3,
622.1863268572112292666876056295897458907E-6,
-150.7605330461704132453896890564005922735E-6,
30.45667334266068954452316950634355399465E-6,
-4.985467362637910490918971198955046040791E-6,
634.7544652695482853159432826255761537347E-9,
-58.94148606074376935076616195808921427536E-9,
3.549164752044786111443983945863436558516E-9,
-103.9794360950620931087104671639678679253E-12,
// 17 136
944.4444444444444444444444444444444444444E-3,
-397.6608187134502923976608187134502923977E-3,
198.8304093567251461988304093567251461988E-3,
-99.41520467836257309941520467836257309942E-3,
46.99627857522594364699627857522594364700E-3,
-20.43316459792432332478099068488084506391E-3,
8.027314663470269877592532054774617703679E-3,
-2.809560132214594457157386219171116196288E-3,
864.4800406814136791253496058988049834732E-6,
-230.5280108483769811000932282396813289262E-6,
52.39272973826749570456664278174575657413E-6,
-9.936552191740387116383328803434540039922E-6,
1.528700337190828787135896738989929236911E-6,
-183.1622523823112832513056000633094938234E-9,
16.02669708345223728448924000553958070955E-9,
-910.6077888325134820732522730420216312245E-12,
25.20713602304535590514193143368918010310E-12,
// 18 153
947.3684210526315789473684210526315789474E-3,
-402.6315789473684210526315789473684210526E-3,
204.5112781954887218045112781954887218045E-3,
-104.5796308954203691045796308954203691046E-3,
50.92573330559600582483877678385687539006E-3,
-22.98731017266486374037861452049095069690E-3,
9.457636185324972510327201402716276858153E-3,
-3.501144164759725400457665903890160183066E-3,
1.152640054241884905500466141198406644631E-3,
-333.4423014056881333769205622752533507682E-6,
83.62189376945156949891424759254002527103E-6,
-17.88579394513269680948999184618217207186E-6,
3.195476833869861464722777699566045630704E-6,
-463.6294513427254357298673001602521562406E-9,
52.45100863675277656741933092722044595853E-9,
-4.338778287966681885172554948023750125246E-9,
233.3460591847627232361710224147226958116E-12,
-6.121733034168157862677326205324515167896E-12,
// 19 171
950.0000000000000000000000000000000000000E-3,
-407.1428571428571428571428571428571428571E-3,
209.7402597402597402597402597402597402597E-3,
-109.4297007340485601355166572557876905703E-3,
54.71485036702428006775832862789384528515E-3,
-25.53359683794466403162055335968379446640E-3,
10.94297007340485601355166572557876905703E-3,
-4.255599472990777338603425559947299077734E-3,
1.486082355647573038877386703473659995399E-3,
-461.1979724423502534447062183194117227101E-6,
125.7812652115500691212835140871122880118E-6,
-29.75470789950646796417459473028462727162E-6,
6.008162172015729108150639320538242045230E-6,
-1.014365042028629589687770274896066838805E-6,
139.2265743960864142708704298876954484635E-9,
-14.91713297100925867187897463082451233537E-9,
1.169971213412490876225801931829373516500E-9,
-59.72826014418121590341931784113918853001E-12,
1.489070197500362723353944212105963148948E-12,
// 20 190
952.3809523809523809523809523809523809524E-3,
-411.2554112554112554112554112554112554113E-3,
214.5680406549971767363071710897797854320E-3,
-113.9892715979672501411631846414455110107E-3,
58.36250705815923207227555053642010163749E-3,
-28.05889762411501541936324545020197194110E-3,
12.47062116627334018638366464453420975160E-3,
-5.066189848798544450718363761842022711588E-3,
1.863426151282223246241237245734996859435E-3,
-614.9306299231336712596082910925489636134E-6,
180.3315630273119270556036044259674380098E-6,
-46.49173109297885619402280426606973011190E-6,
10.40374402080645733013097717842119834672E-6,
-1.988951062801234489583863284109934978049E-6,
318.2321700481975183334181254575895964879E-9,
-41.43648047502571853299715175229031204269E-9,
4.216112480765732887300187141727472131530E-9,
-314.3592639167432415969437781112588870000E-12,
15.27251484615756639337378679083039127126E-12,
-362.7222275962422018426274362822217926924E-15,
// 21 210
954.5454545454545454545454545454545454545E-3,
-415.0197628458498023715415019762845849802E-3,
219.0382081686429512516469038208168642951E-3,
-118.2806324110671936758893280632411067194E-3,
61.86986926117360899969595621769534813013E-3,
-30.55302185736968345663997837910881389142E-3,
14.02944881205750770968162272510098597055E-3,
-5.926232687851878256675857875258175108250E-3,
2.282697035320723476645515626025371152807E-3,
-795.2621929504455983152118955185164016232E-6,
248.5194352970142494735037173495363755073E-6,
-69.03317647139284707597325481931565986313E-6,
16.86783497491046942127853285177848702538E-6,
-3.580111913042222081250953911397882960489E-6,
649.7240138484032665973953394759120928294E-9,
-98.77561021343968580027969687978393303150E-9,
12.23227371064268554802225348356457375003E-9,
-1.184892610147724526019249625188591189462E-9,
84.18973808944358474347299968445253188281E-12,
-3.901475667559580756404846326840483184813E-12,
88.46883599908346386405547226395653480302E-15,
// 22 231
956.5217391304347826086956521739130434783E-3,
-418.4782608695652173913043478260869565217E-3,
223.1884057971014492753623188405797101449E-3,
-122.3244147157190635451505016722408026756E-3,
65.23968784838350055741360089186176142698E-3,
-33.00817539947974730583426235600148643627E-3,
15.60977752881800857812851815850316599942E-3,
-6.829277668857878752931226694345135124745E-3,
2.741502146638288244904291719593745999898E-3,
-1.002361722364624139543131659976463381213E-3,
331.3592470626856659646716231327151673430E-6,
-98.27075709457099407285604509573170404045E-6,
25.91756231065608634888511079447869117550E-6,
-6.016576964973734330991186434432553308599E-6,
1.214156072210915756884707893074677424438E-6,
-209.6815585232667014356814617974360683651E-9,
30.36113064590287079611677274895001894880E-9,
-3.584300145696866691208230116195488348122E-9,
331.2831970348836993157799337176831335876E-12,
-22.47993122736710816785649550227135549345E-12,
995.7887586873580583768104319943014615037E-15,
-21.60285530210177605982749904119868873097E-15,
// 23 253
958.3333333333333333333333333333333333333E-3,
-421.6666666666666666666666666666666666667E-3,
227.0512820512820512820512820512820512821E-3,
-126.1396011396011396011396011396011396011E-3,
68.47578347578347578347578347578347578348E-3,
-35.41850869437076333628057765988800471559E-3,
17.20327565155151362047913772051703086186E-3,
-7.769221261991006151184126712491562324710E-3,
3.237175525829585896326719463538150968629E-3,
-1.236012473498569160415656522441839460749E-3,
429.6300041572566600375276682284468713835E-6,
-135.0265727351378074403658385860833024348E-6,
38.08441795093630466266728780633118786623E-6,
-9.557865508922238621904917789233695796543E-6,
2.112791323024915905894771300777974860289E-6,
-406.3060236586376742105329424573028577478E-9,
66.92099213201091104644071993414400009964E-9,
-9.249242814993377949508066982767869932470E-9,
1.043147685901508791297902291289609390880E-9,
-92.18514433548217225423322574187245779871E-12,
5.986048333472868328196962710511198558358E-12,
-253.9535656624853230144165998398690297485E-15,
5.280697962735989703513388654515235023126E-15,
// 24 276
960.0000000000000000000000000000000000000E-3,
-424.6153846153846153846153846153846153846E-3,
230.6552706552706552706552706552706552707E-3,
-129.7435897435897435897435897435897435897E-3,
71.58267020335985853227232537577365163572E-3,
-37.77974260732881422536594950388053836330E-3,
18.80282120549083841631116380838294075224E-3,
-8.740373919739881920082142551553007615299E-3,
3.766895157328972679362000830298939308950E-3,
-1.495678959527680328570206212030461196201E-3,
543.8832580100655740255295316474404349821E-6,
-180.0354303135170765871544514481110699131E-6,
53.89834088388037220072815178488148454778E-6,
-14.48771192931370906899271749104897047055E-6,
3.467144735220374819929881108968984386115E-6,
-731.3508425855478135789592964231451439460E-9,
134.3083326412483789642852079514527667505E-9,
-21.14112643427057817030415310346941698850E-9,
2.794665428275670064128333580385186089056E-9,
-301.6968360070325637411269206097644073412E-12,
25.54047288948423820030704089818111384899E-12,
-1.589970150234690718003303929432223490599E-12,
64.71663886246659721752578436171862496426E-15,
-1.292085671733274076391573819721812824807E-15,
// 25 300
961.5384615384615384615384615384615384615E-3,
-427.3504273504273504273504273504273504274E-3,
234.0252340252340252340252340252340252340E-3,
-133.1522883247021178055660814281503936676E-3,
74.56528146183318597111700559976422045388E-3,
-40.08886100098558385543925032245388196445E-3,
20.40236675943016321214318989624885064262E-3,
-9.737493226091668805795613359573315079433E-3,
4.327774767151852802575828159810362257526E-3,
-1.780570189913905153059769300036263328811E-3,
674.4584052704186185832459469834330790949E-6,
-233.9337711973974487878826032329925544609E-6,
73.87382248338866803827871681041870140869E-6,
-21.10680642382533372522249051726248611677E-6,
5.417413648781835656140439232764038103304E-6,
-1.238737877008041689666258970906411151670E-6,
249.8294877999411811091614731239820810091E-9,
-43.89768777880103440677772654374878942796E-9,
6.616158684364749204849274574775535248711E-9,
-838.0467666862015659475747794715677981701E-12,
86.75432367351983084343424218132171823707E-12,
-7.047740027636040416681311743937160862586E-12,
421.3322842608502423016001586049389646111E-15,
-16.48068458843461832132109463930883705111E-15,
316.4291440979446717693650170747296713814E-18,
// 26 325
962.9629629629629629629629629629629629630E-3,
-429.8941798941798941798941798941798941799E-3,
237.1829958036854588578726509760992519613E-3,
-136.3802225871191388432767743112570698778E-3,
77.42877153333215624650552347993949773705E-3,
-42.34385943229102294730770815309191282495E-3,
21.99681009469663529730270553407372094803E-3,
-10.75579317130386946522521998541104737532E-3,
4.916934021167483184102957707616478800147E-3,
-2.089696958996180353243757025737003490062E-3,
821.5024900230684435356292975870283990417E-6,
-297.2541904688734499635500747847800128112E-6,
98.49842997785155738437162241389160187825E-6,
-29.72541904688734499635500747847800128112E-6,
8.120114471344835706321367896559844252402E-6,
-1.993778106803419481462835867458890329831E-6,
436.3946608598455773379668109349144360779E-9,
-84.30351402974289562210722483969937969687E-9,
14.19848657343038242056542734142305342263E-9,
-2.052607298115479197755654170010071853489E-9,
249.5571183119123644687725434662701341628E-12,
-24.81391801396855896706545176511208720369E-12,
1.937555266396835127863140865421351973314E-12,
-111.4094278178180198521305997617277384655E-15,
4.194237282553148982668446108676808977526E-15,
-77.55616276910408621798162183204158612289E-18,
// 27 351
964.2857142857142857142857142857142857143E-3,
-432.2660098522167487684729064039408866995E-3,
240.1477832512315270935960591133004926108E-3,
-139.4406483394247576672493246464325440966E-3,
80.17837279516923565866836167169871285555E-3,
-44.54354044176068647703797870649928491975E-3,
23.58187435152036342902010637402903319281E-3,
-11.79093717576018171451005318701451659640E-3,
5.531550773813418582115827421068538650165E-3,
-2.421922230696685973791254168143522327910E-3,
984.9922947809488410155818387186574060879E-6,
-370.4244527381346068776547085779566313493E-6,
128.2238490247389023807266298923696031594E-6,
-40.65634237369770075486454118538548392858E-6,
11.74516557462378021807197856466691757937E-6,
-3.072863086384128545425808345407042389951E-6,
723.0266085609714224531313753898923270473E-9,
-151.7463252535372121197930047114588834544E-9,
28.12689323921399126247879492363425528559E-9,
-4.548178481234602842443379604672773195117E-9,
631.6914557270281725615805006489962770996E-12,
-73.83406625380848770200291566027229212852E-12,
7.062388946016464041061148454460827942728E-12,
-530.8331560731329181189752106294086362181E-15,
29.39999018251197700351247320409032446747E-15,
-1.066763069031639223639784949350156911011E-15,
19.02320973581798341195775629842529470939E-18,
// 28 378
965.5172413793103448275862068965517241379E-3,
-434.4827586206896551724137931034482758621E-3,
242.9365962180200222469410456062291434928E-3,
-142.3456618464961067853170189098998887653E-3,
82.81929416523409849327535645666902619072E-3,
-46.68734720098981042513071564959283339183E-3,
25.15399930828838765762144679896430207233E-3,
-12.83902048027219786691094680363802918275E-3,
6.168898729259914891008262728474728736457E-3,
-2.776004428166961700953718227813627931406E-3,
1.164757102727396517882678976705018712478E-3,
-453.7699546042148934251270180079968734029E-6,
163.4593457486102430349425655863703558975E-6,
-54.20845649826360100648605491384731190477E-6,
16.47264724598397798026552986529313664083E-6,
-4.562736097964312082601957846210456882048E-6,
1.145157295175356757986373733950859766475E-6,
-258.6285195625503789172124254212690535396E-9,
52.13116855684106181981885394829611381538E-9,
-9.285864399187314136655233359540245273364E-9,
1.443866184518921537283612572911991490513E-9,
-192.9530264766195145279009529255115900959E-12,
21.71338405363493258114486426783250630235E-12,
-2.000832665198731768294598870833924859591E-12,
144.9659893276062262311558427170239898270E-15,
-7.743909686303751401236957410097435354006E-15,
271.1672078705690726359069261448260191302E-18,
-4.669333298791686837480540182340754155941E-18,
// 29 406
966.6666666666666666666666666666666666667E-3,
-436.5591397849462365591397849462365591398E-3,
245.5645161290322580645161290322580645161E-3,
-145.1063049853372434017595307917888563050E-3,
85.35664999137484905985854752458168017940E-3,
-48.77522856649991374849059858547524581680E-3,
26.71024421498804800512580398728406318539E-3,
-13.89654597671675470536950612851941125186E-3,
6.826373462246826872813090729799009036001E-3,
-3.150633905652381633606041875291850324308E-3,
1.360501004713528432693518082512389912769E-3,
-547.5186970188590034010499600354739892853E-6,
204.5674252597934737981944905627045674253E-6,
-70.68110374424757898675158477914044854560E-6,
22.48944210044241149578459515699923362815E-6,
-6.559420612629036686270506920791443141543E-6,
1.744705226888541727294457083791074390589E-6,
-420.7090618029107711206492258786987892201E-9,
91.33815157563194373014095035524381608069E-9,
-17.70841714221435643747630670152686230136E-9,
3.035728652951032532138795434547462108804E-9,
-454.5476057894594165769319367236841660242E-12,
58.52870509329159711776548349117672371883E-12,
-6.349812345026918555229274152344644554401E-12,
564.4277640023927604648243690973017381690E-15,
-39.47047300716033289963806776904207959224E-15,
2.036175194813826697203551115069631090076E-15,
-68.89314568918962509335323321664165342362E-18,
1.146853792685677468854869518469658915494E-18,
// 30 435
967.7419354838709677419354838709677419355E-3,
-438.5080645161290322580645161290322580645E-3,
248.0449657869012707722385141739980449658E-3,
-147.7326634466103156805244091771606003105E-3,
87.79541141969984474728307745385544247024E-3,
-50.80752975677074348801104019320338105917E-3,
28.24820187249029753773586790664589526070E-3,
-14.96039638641755889334037741108549058214E-3,
7.501509299172337222871528274504405534067E-3,
-3.544463143858929337806797109703331614847E-3,
1.571824010580456469093923330245379873546E-3,
-651.8079726414988135726785238517547491491E-6,
251.8613990886650156202120950840411910845E-6,
-90.35936558213468904556310554151477797023E-6,
29.98592280058988199437946020933231150420E-6,
-9.166892160506417729260025199863818054951E-6,
2.569942232732712855261884411476214323290E-6,
-657.3579090670480798760144154354668581565E-9,
152.5126835214633568777863735059729767581E-9,
-31.87515085598584158745735206274835214244E-9,
5.952409123433397121840775361857768840793E-9,
-983.3962625252727762481700554118167053407E-12,
141.9833816279310981293502294934206235767E-12,
-17.63836762507477376452576153429067931778E-12,
1.847218136735103579703061571591169324917E-12,
-158.5867219037691946860458080006154983617E-15,
10.71671155165171945896605850036647942145E-15,
-534.5157855195746774484302577153231731143E-18,
17.49437988842558850795563672241852582957E-18,
-281.8538982024122592948408138611873605876E-21
};
const int n = n_points/2;
insist(2*n+1 == n_points);
insist(1 <= n && n <= NMAX);
int index = 0;
for(int i = 0; i < n; i++)
index += i;
cout << " Gradients npoints: " << 2*n+1 << ", coefficents index: " << index << endl;
size_t size = 0;
checkCudaErrors(hipGetSymbolSize(&size, gradient_coeffients_dev));
insist(size > n*sizeof(double));
checkCudaErrors(hipMemcpyToSymbol(gradient_coeffients_dev, a+index, n*sizeof(double)));
}
template<class T> __global__ void gradients2_3d(const int n1, const int n2, const int n3,
const int n2p, const double dx2,
const T *f, T *v, T *g, const int n_points)
{
extern __shared__ double coefs[];
const int n = n_points/2;
for(int i = threadIdx.x; i < n; i += blockDim.x)
coefs[i] = gradient_coeffients_dev[i];
__syncthreads();
const int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index < n1*n3) {
int i = -1; int k = -1;
index_2_ij(index, n1, n3, i, k);
v[index] = f[ijk_2_index(n1, n2, n3, i, n2p, k)];
T g_tmp = 0.0;
for(int ii = 0; ii < n; ii++) {
g_tmp += coefs[ii]*
(f[ijk_2_index(n1, n2, n3, i, n2p+ii+1, k)] - f[ijk_2_index(n1, n2, n3, i, n2p-ii-1, k)]);
}
g[index] = g_tmp/dx2;
}
}
// For complex
template __global__ void gradients2_3d<Complex>(const int n1, const int n2, const int n3,
const int n2p, const double dx2,
const Complex *f, Complex *v, Complex *g,
const int n_points);
// For double
template __global__ void gradients2_3d<double>(const int n1, const int n2, const int n3,
const int n2p, const double dx2,
const double *f, double *v, double *g,
const int n_points);
| 1cfe5a9bc99411edeb5c67fedb92e90296c57e27.cu |
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "matutils.h"
#include "gradients.h"
#include "cumath.h"
using cumath::ij_2_index;
using cumath::index_2_ij;
using cumath::ijk_2_index;
#define NMAX 30
__constant__ double gradient_coeffients_dev[NMAX+10];
/** Mathematica code to generate these coeffients
Reference: http://www.trentfguidry.net/post/2009/07/12/Calculate-derivatives-function-numerically.aspx
For[n = 1, n <= 25, n++,
A = Table[2 j^i/i!, {j, 1, n}, {i, 1, 2 n - 1, 2}];
B = Inverse[A];
Print[B[[1]]];
]
For[n = 1, n <= 30, n++,
A = Table[2 j^i/i!, {j, 1, n}, {i, 1, 2 n - 1, 2}];
B = Inverse[A];
Print[];
Print[EngineeringForm[N[B[[1]], 200], 40,
NumberFormat -> (SequenceForm[#1, "E", #3] &)]];
]
**/
void copy_gradient_coefficients_to_device(const int n_points)
{
const double a[] = {
// 1 0
500.0000000000000000000000000000000000000E-3,
// 2 1
666.6666666666666666666666666666666666667E-3,
-83.33333333333333333333333333333333333333E-3,
// 3 3
750.0000000000000000000000000000000000000E-3,
-150.0000000000000000000000000000000000000E-3,
16.66666666666666666666666666666666666667E-3,
// 4 6
800.0000000000000000000000000000000000000E-3,
-200.0000000000000000000000000000000000000E-3,
38.09523809523809523809523809523809523810E-3,
-3.571428571428571428571428571428571428571E-3,
// 5 10
833.3333333333333333333333333333333333333E-3,
-238.0952380952380952380952380952380952381E-3,
59.52380952380952380952380952380952380952E-3,
-9.920634920634920634920634920634920634921E-3,
793.6507936507936507936507936507936507937E-6,
// 6 15
857.1428571428571428571428571428571428571E-3,
-267.8571428571428571428571428571428571429E-3,
79.36507936507936507936507936507936507937E-3,
-17.85714285714285714285714285714285714286E-3,
2.597402597402597402597402597402597402597E-3,
-180.3751803751803751803751803751803751804E-6,
// 7 21
875.0000000000000000000000000000000000000E-3,
-291.6666666666666666666666666666666666667E-3,
97.22222222222222222222222222222222222222E-3,
-26.51515151515151515151515151515151515152E-3,
5.303030303030303030303030303030303030303E-3,
-679.8756798756798756798756798756798756799E-6,
41.62504162504162504162504162504162504163E-6,
// 8 28
888.8888888888888888888888888888888888889E-3,
-311.1111111111111111111111111111111111111E-3,
113.1313131313131313131313131313131313131E-3,
-35.35353535353535353535353535353535353535E-3,
8.702408702408702408702408702408702408702E-3,
-1.554001554001554001554001554001554001554E-3,
177.6001776001776001776001776001776001776E-6,
-9.712509712509712509712509712509712509713E-6,
// 9 36
900.0000000000000000000000000000000000000E-3,
-327.2727272727272727272727272727272727273E-3,
127.2727272727272727272727272727272727273E-3,
-44.05594405594405594405594405594405594406E-3,
12.58741258741258741258741258741258741259E-3,
-2.797202797202797202797202797202797202797E-3,
449.5504495504495504495504495504495504496E-6,
-46.27725215960510078157136980666392431098E-6,
2.285296402943461766991178755884638237579E-6,
// 10 45
909.0909090909090909090909090909090909091E-3,
-340.9090909090909090909090909090909090909E-3,
139.8601398601398601398601398601398601399E-3,
-52.44755244755244755244755244755244755245E-3,
16.78321678321678321678321678321678321678E-3,
-4.370629370629370629370629370629370629371E-3,
881.4714697067638244108832344126461773521E-6,
-128.5479226655697243932538050185109008638E-6,
12.02787580496558824732199345202441177673E-6,
-541.2544112234514711294897053410985299530E-9,
// 11 55
916.6666666666666666666666666666666666667E-3,
-352.5641025641025641025641025641025641026E-3,
151.0989010989010989010989010989010989011E-3,
-60.43956043956043956043956043956043956044E-3,
21.15384615384615384615384615384615384615E-3,
-6.221719457013574660633484162895927601810E-3,
1.481361775479422538246067657832363714717E-3,
-272.8824323251567833611177264428038421846E-6,
36.38432431002090444814903019237384562462E-6,
-3.118656369430363238412774016489186767825E-6,
128.8700979103455883641642155574044118936E-9,
// 12 66
923.0769230769230769230769230769230769231E-3,
-362.6373626373626373626373626373626373626E-3,
161.1721611721611721611721611721611721612E-3,
-67.99450549450549450549450549450549450549E-3,
25.59793148028442146089204912734324499030E-3,
-8.295625942684766214177978883861236802413E-3,
2.245432585989861531657197291872214472834E-3,
-491.1883781852822100500119075970469159324E-6,
83.16416985147635302434064043971164714199E-6,
-10.20651175449937059844180587214642942197E-6,
806.8388738734680314973759582724450135946E-9,
-30.81676254377829286969144285068366371368E-9,
// 13 78
928.5714285714285714285714285714285714286E-3,
-371.4285714285714285714285714285714285714E-3,
170.2380952380952380952380952380952380952E-3,
-75.10504201680672268907563025210084033613E-3,
30.04201680672268907563025210084033613445E-3,
-10.54105852867462774583517617573345127525E-3,
3.162317558602388323750552852720035382574E-3,
-790.5793896505970809376382131800088456435E-6,
159.7130080102216325126541844808098678068E-6,
-24.99855777551295117589369844047458800454E-6,
2.840745201762835360897011186417566818698E-6,
-208.3213147959412597991141536706215667045E-9,
7.396023010506790288725946284164079291283E-9,
// 14 91
933.3333333333333333333333333333333333333E-3,
-379.1666666666666666666666666666666666667E-3,
178.4313725490196078431372549019607843137E-3,
-81.78104575163398692810457516339869281046E-3,
34.43412452700378396972824217406260749914E-3,
-12.91279669762641898864809081527347781218E-3,
4.216423411469851098334070470293380510099E-3,
-1.173890608875128998968008255933952528380E-3,
272.2065180000299128041758274629455138272E-6,
-51.03872212500560865078296764930228384260E-6,
7.423814127273543076477522567171241286196E-6,
-785.2111096154709023197379638354197514246E-9,
53.68964852071595913297353598874664967006E-9,
-1.780524058084968032471061142483945014568E-9,
// 15 105
937.5000000000000000000000000000000000000E-3,
-386.0294117647058823529411764705882352941E-3,
185.8660130718954248366013071895424836601E-3,
-88.04179566563467492260061919504643962848E-3,
38.73839009287925696594427244582043343653E-3,
-15.37237702098383212934296525627794977640E-3,
5.390314020344980097302078726227333038479E-3,
-1.640530354018037420918023960156144837798E-3,
425.3226843750467387565247304108523653550E-6,
-91.86969982501009557140934176874411091668E-6,
16.06113633304372300199464016936085855187E-6,
-2.181141971154085839777049899542832642846E-6,
215.7173378064480500878400999547856459958E-9,
-13.81441079548682094158581920892715959579E-9,
429.7816691929233181826699309444005207579E-12,
// 16 120
941.1764705882352941176470588235294117647E-3,
-392.1568627450980392156862745098039215686E-3,
192.6384588923288613691090471276229790162E-3,
-93.91124871001031991744066047471620227038E-3,
42.93085655314757481940144478844169246646E-3,
-17.88785689714482284141726866185070519436E-3,
6.666282073470120313571652917459890134544E-3,
-2.187373805357383227890698613541526450397E-3,
622.1863268572112292666876056295897458907E-6,
-150.7605330461704132453896890564005922735E-6,
30.45667334266068954452316950634355399465E-6,
-4.985467362637910490918971198955046040791E-6,
634.7544652695482853159432826255761537347E-9,
-58.94148606074376935076616195808921427536E-9,
3.549164752044786111443983945863436558516E-9,
-103.9794360950620931087104671639678679253E-12,
// 17 136
944.4444444444444444444444444444444444444E-3,
-397.6608187134502923976608187134502923977E-3,
198.8304093567251461988304093567251461988E-3,
-99.41520467836257309941520467836257309942E-3,
46.99627857522594364699627857522594364700E-3,
-20.43316459792432332478099068488084506391E-3,
8.027314663470269877592532054774617703679E-3,
-2.809560132214594457157386219171116196288E-3,
864.4800406814136791253496058988049834732E-6,
-230.5280108483769811000932282396813289262E-6,
52.39272973826749570456664278174575657413E-6,
-9.936552191740387116383328803434540039922E-6,
1.528700337190828787135896738989929236911E-6,
-183.1622523823112832513056000633094938234E-9,
16.02669708345223728448924000553958070955E-9,
-910.6077888325134820732522730420216312245E-12,
25.20713602304535590514193143368918010310E-12,
// 18 153
947.3684210526315789473684210526315789474E-3,
-402.6315789473684210526315789473684210526E-3,
204.5112781954887218045112781954887218045E-3,
-104.5796308954203691045796308954203691046E-3,
50.92573330559600582483877678385687539006E-3,
-22.98731017266486374037861452049095069690E-3,
9.457636185324972510327201402716276858153E-3,
-3.501144164759725400457665903890160183066E-3,
1.152640054241884905500466141198406644631E-3,
-333.4423014056881333769205622752533507682E-6,
83.62189376945156949891424759254002527103E-6,
-17.88579394513269680948999184618217207186E-6,
3.195476833869861464722777699566045630704E-6,
-463.6294513427254357298673001602521562406E-9,
52.45100863675277656741933092722044595853E-9,
-4.338778287966681885172554948023750125246E-9,
233.3460591847627232361710224147226958116E-12,
-6.121733034168157862677326205324515167896E-12,
// 19 171
950.0000000000000000000000000000000000000E-3,
-407.1428571428571428571428571428571428571E-3,
209.7402597402597402597402597402597402597E-3,
-109.4297007340485601355166572557876905703E-3,
54.71485036702428006775832862789384528515E-3,
-25.53359683794466403162055335968379446640E-3,
10.94297007340485601355166572557876905703E-3,
-4.255599472990777338603425559947299077734E-3,
1.486082355647573038877386703473659995399E-3,
-461.1979724423502534447062183194117227101E-6,
125.7812652115500691212835140871122880118E-6,
-29.75470789950646796417459473028462727162E-6,
6.008162172015729108150639320538242045230E-6,
-1.014365042028629589687770274896066838805E-6,
139.2265743960864142708704298876954484635E-9,
-14.91713297100925867187897463082451233537E-9,
1.169971213412490876225801931829373516500E-9,
-59.72826014418121590341931784113918853001E-12,
1.489070197500362723353944212105963148948E-12,
// 20 190
952.3809523809523809523809523809523809524E-3,
-411.2554112554112554112554112554112554113E-3,
214.5680406549971767363071710897797854320E-3,
-113.9892715979672501411631846414455110107E-3,
58.36250705815923207227555053642010163749E-3,
-28.05889762411501541936324545020197194110E-3,
12.47062116627334018638366464453420975160E-3,
-5.066189848798544450718363761842022711588E-3,
1.863426151282223246241237245734996859435E-3,
-614.9306299231336712596082910925489636134E-6,
180.3315630273119270556036044259674380098E-6,
-46.49173109297885619402280426606973011190E-6,
10.40374402080645733013097717842119834672E-6,
-1.988951062801234489583863284109934978049E-6,
318.2321700481975183334181254575895964879E-9,
-41.43648047502571853299715175229031204269E-9,
4.216112480765732887300187141727472131530E-9,
-314.3592639167432415969437781112588870000E-12,
15.27251484615756639337378679083039127126E-12,
-362.7222275962422018426274362822217926924E-15,
// 21 210
954.5454545454545454545454545454545454545E-3,
-415.0197628458498023715415019762845849802E-3,
219.0382081686429512516469038208168642951E-3,
-118.2806324110671936758893280632411067194E-3,
61.86986926117360899969595621769534813013E-3,
-30.55302185736968345663997837910881389142E-3,
14.02944881205750770968162272510098597055E-3,
-5.926232687851878256675857875258175108250E-3,
2.282697035320723476645515626025371152807E-3,
-795.2621929504455983152118955185164016232E-6,
248.5194352970142494735037173495363755073E-6,
-69.03317647139284707597325481931565986313E-6,
16.86783497491046942127853285177848702538E-6,
-3.580111913042222081250953911397882960489E-6,
649.7240138484032665973953394759120928294E-9,
-98.77561021343968580027969687978393303150E-9,
12.23227371064268554802225348356457375003E-9,
-1.184892610147724526019249625188591189462E-9,
84.18973808944358474347299968445253188281E-12,
-3.901475667559580756404846326840483184813E-12,
88.46883599908346386405547226395653480302E-15,
// 22 231
956.5217391304347826086956521739130434783E-3,
-418.4782608695652173913043478260869565217E-3,
223.1884057971014492753623188405797101449E-3,
-122.3244147157190635451505016722408026756E-3,
65.23968784838350055741360089186176142698E-3,
-33.00817539947974730583426235600148643627E-3,
15.60977752881800857812851815850316599942E-3,
-6.829277668857878752931226694345135124745E-3,
2.741502146638288244904291719593745999898E-3,
-1.002361722364624139543131659976463381213E-3,
331.3592470626856659646716231327151673430E-6,
-98.27075709457099407285604509573170404045E-6,
25.91756231065608634888511079447869117550E-6,
-6.016576964973734330991186434432553308599E-6,
1.214156072210915756884707893074677424438E-6,
-209.6815585232667014356814617974360683651E-9,
30.36113064590287079611677274895001894880E-9,
-3.584300145696866691208230116195488348122E-9,
331.2831970348836993157799337176831335876E-12,
-22.47993122736710816785649550227135549345E-12,
995.7887586873580583768104319943014615037E-15,
-21.60285530210177605982749904119868873097E-15,
// 23 253
958.3333333333333333333333333333333333333E-3,
-421.6666666666666666666666666666666666667E-3,
227.0512820512820512820512820512820512821E-3,
-126.1396011396011396011396011396011396011E-3,
68.47578347578347578347578347578347578348E-3,
-35.41850869437076333628057765988800471559E-3,
17.20327565155151362047913772051703086186E-3,
-7.769221261991006151184126712491562324710E-3,
3.237175525829585896326719463538150968629E-3,
-1.236012473498569160415656522441839460749E-3,
429.6300041572566600375276682284468713835E-6,
-135.0265727351378074403658385860833024348E-6,
38.08441795093630466266728780633118786623E-6,
-9.557865508922238621904917789233695796543E-6,
2.112791323024915905894771300777974860289E-6,
-406.3060236586376742105329424573028577478E-9,
66.92099213201091104644071993414400009964E-9,
-9.249242814993377949508066982767869932470E-9,
1.043147685901508791297902291289609390880E-9,
-92.18514433548217225423322574187245779871E-12,
5.986048333472868328196962710511198558358E-12,
-253.9535656624853230144165998398690297485E-15,
5.280697962735989703513388654515235023126E-15,
// 24 276
960.0000000000000000000000000000000000000E-3,
-424.6153846153846153846153846153846153846E-3,
230.6552706552706552706552706552706552707E-3,
-129.7435897435897435897435897435897435897E-3,
71.58267020335985853227232537577365163572E-3,
-37.77974260732881422536594950388053836330E-3,
18.80282120549083841631116380838294075224E-3,
-8.740373919739881920082142551553007615299E-3,
3.766895157328972679362000830298939308950E-3,
-1.495678959527680328570206212030461196201E-3,
543.8832580100655740255295316474404349821E-6,
-180.0354303135170765871544514481110699131E-6,
53.89834088388037220072815178488148454778E-6,
-14.48771192931370906899271749104897047055E-6,
3.467144735220374819929881108968984386115E-6,
-731.3508425855478135789592964231451439460E-9,
134.3083326412483789642852079514527667505E-9,
-21.14112643427057817030415310346941698850E-9,
2.794665428275670064128333580385186089056E-9,
-301.6968360070325637411269206097644073412E-12,
25.54047288948423820030704089818111384899E-12,
-1.589970150234690718003303929432223490599E-12,
64.71663886246659721752578436171862496426E-15,
-1.292085671733274076391573819721812824807E-15,
// 25 300
961.5384615384615384615384615384615384615E-3,
-427.3504273504273504273504273504273504274E-3,
234.0252340252340252340252340252340252340E-3,
-133.1522883247021178055660814281503936676E-3,
74.56528146183318597111700559976422045388E-3,
-40.08886100098558385543925032245388196445E-3,
20.40236675943016321214318989624885064262E-3,
-9.737493226091668805795613359573315079433E-3,
4.327774767151852802575828159810362257526E-3,
-1.780570189913905153059769300036263328811E-3,
674.4584052704186185832459469834330790949E-6,
-233.9337711973974487878826032329925544609E-6,
73.87382248338866803827871681041870140869E-6,
-21.10680642382533372522249051726248611677E-6,
5.417413648781835656140439232764038103304E-6,
-1.238737877008041689666258970906411151670E-6,
249.8294877999411811091614731239820810091E-9,
-43.89768777880103440677772654374878942796E-9,
6.616158684364749204849274574775535248711E-9,
-838.0467666862015659475747794715677981701E-12,
86.75432367351983084343424218132171823707E-12,
-7.047740027636040416681311743937160862586E-12,
421.3322842608502423016001586049389646111E-15,
-16.48068458843461832132109463930883705111E-15,
316.4291440979446717693650170747296713814E-18,
// 26 325
962.9629629629629629629629629629629629630E-3,
-429.8941798941798941798941798941798941799E-3,
237.1829958036854588578726509760992519613E-3,
-136.3802225871191388432767743112570698778E-3,
77.42877153333215624650552347993949773705E-3,
-42.34385943229102294730770815309191282495E-3,
21.99681009469663529730270553407372094803E-3,
-10.75579317130386946522521998541104737532E-3,
4.916934021167483184102957707616478800147E-3,
-2.089696958996180353243757025737003490062E-3,
821.5024900230684435356292975870283990417E-6,
-297.2541904688734499635500747847800128112E-6,
98.49842997785155738437162241389160187825E-6,
-29.72541904688734499635500747847800128112E-6,
8.120114471344835706321367896559844252402E-6,
-1.993778106803419481462835867458890329831E-6,
436.3946608598455773379668109349144360779E-9,
-84.30351402974289562210722483969937969687E-9,
14.19848657343038242056542734142305342263E-9,
-2.052607298115479197755654170010071853489E-9,
249.5571183119123644687725434662701341628E-12,
-24.81391801396855896706545176511208720369E-12,
1.937555266396835127863140865421351973314E-12,
-111.4094278178180198521305997617277384655E-15,
4.194237282553148982668446108676808977526E-15,
-77.55616276910408621798162183204158612289E-18,
// 27 351
964.2857142857142857142857142857142857143E-3,
-432.2660098522167487684729064039408866995E-3,
240.1477832512315270935960591133004926108E-3,
-139.4406483394247576672493246464325440966E-3,
80.17837279516923565866836167169871285555E-3,
-44.54354044176068647703797870649928491975E-3,
23.58187435152036342902010637402903319281E-3,
-11.79093717576018171451005318701451659640E-3,
5.531550773813418582115827421068538650165E-3,
-2.421922230696685973791254168143522327910E-3,
984.9922947809488410155818387186574060879E-6,
-370.4244527381346068776547085779566313493E-6,
128.2238490247389023807266298923696031594E-6,
-40.65634237369770075486454118538548392858E-6,
11.74516557462378021807197856466691757937E-6,
-3.072863086384128545425808345407042389951E-6,
723.0266085609714224531313753898923270473E-9,
-151.7463252535372121197930047114588834544E-9,
28.12689323921399126247879492363425528559E-9,
-4.548178481234602842443379604672773195117E-9,
631.6914557270281725615805006489962770996E-12,
-73.83406625380848770200291566027229212852E-12,
7.062388946016464041061148454460827942728E-12,
-530.8331560731329181189752106294086362181E-15,
29.39999018251197700351247320409032446747E-15,
-1.066763069031639223639784949350156911011E-15,
19.02320973581798341195775629842529470939E-18,
// 28 378
965.5172413793103448275862068965517241379E-3,
-434.4827586206896551724137931034482758621E-3,
242.9365962180200222469410456062291434928E-3,
-142.3456618464961067853170189098998887653E-3,
82.81929416523409849327535645666902619072E-3,
-46.68734720098981042513071564959283339183E-3,
25.15399930828838765762144679896430207233E-3,
-12.83902048027219786691094680363802918275E-3,
6.168898729259914891008262728474728736457E-3,
-2.776004428166961700953718227813627931406E-3,
1.164757102727396517882678976705018712478E-3,
-453.7699546042148934251270180079968734029E-6,
163.4593457486102430349425655863703558975E-6,
-54.20845649826360100648605491384731190477E-6,
16.47264724598397798026552986529313664083E-6,
-4.562736097964312082601957846210456882048E-6,
1.145157295175356757986373733950859766475E-6,
-258.6285195625503789172124254212690535396E-9,
52.13116855684106181981885394829611381538E-9,
-9.285864399187314136655233359540245273364E-9,
1.443866184518921537283612572911991490513E-9,
-192.9530264766195145279009529255115900959E-12,
21.71338405363493258114486426783250630235E-12,
-2.000832665198731768294598870833924859591E-12,
144.9659893276062262311558427170239898270E-15,
-7.743909686303751401236957410097435354006E-15,
271.1672078705690726359069261448260191302E-18,
-4.669333298791686837480540182340754155941E-18,
// 29 406
966.6666666666666666666666666666666666667E-3,
-436.5591397849462365591397849462365591398E-3,
245.5645161290322580645161290322580645161E-3,
-145.1063049853372434017595307917888563050E-3,
85.35664999137484905985854752458168017940E-3,
-48.77522856649991374849059858547524581680E-3,
26.71024421498804800512580398728406318539E-3,
-13.89654597671675470536950612851941125186E-3,
6.826373462246826872813090729799009036001E-3,
-3.150633905652381633606041875291850324308E-3,
1.360501004713528432693518082512389912769E-3,
-547.5186970188590034010499600354739892853E-6,
204.5674252597934737981944905627045674253E-6,
-70.68110374424757898675158477914044854560E-6,
22.48944210044241149578459515699923362815E-6,
-6.559420612629036686270506920791443141543E-6,
1.744705226888541727294457083791074390589E-6,
-420.7090618029107711206492258786987892201E-9,
91.33815157563194373014095035524381608069E-9,
-17.70841714221435643747630670152686230136E-9,
3.035728652951032532138795434547462108804E-9,
-454.5476057894594165769319367236841660242E-12,
58.52870509329159711776548349117672371883E-12,
-6.349812345026918555229274152344644554401E-12,
564.4277640023927604648243690973017381690E-15,
-39.47047300716033289963806776904207959224E-15,
2.036175194813826697203551115069631090076E-15,
-68.89314568918962509335323321664165342362E-18,
1.146853792685677468854869518469658915494E-18,
// 30 435
967.7419354838709677419354838709677419355E-3,
-438.5080645161290322580645161290322580645E-3,
248.0449657869012707722385141739980449658E-3,
-147.7326634466103156805244091771606003105E-3,
87.79541141969984474728307745385544247024E-3,
-50.80752975677074348801104019320338105917E-3,
28.24820187249029753773586790664589526070E-3,
-14.96039638641755889334037741108549058214E-3,
7.501509299172337222871528274504405534067E-3,
-3.544463143858929337806797109703331614847E-3,
1.571824010580456469093923330245379873546E-3,
-651.8079726414988135726785238517547491491E-6,
251.8613990886650156202120950840411910845E-6,
-90.35936558213468904556310554151477797023E-6,
29.98592280058988199437946020933231150420E-6,
-9.166892160506417729260025199863818054951E-6,
2.569942232732712855261884411476214323290E-6,
-657.3579090670480798760144154354668581565E-9,
152.5126835214633568777863735059729767581E-9,
-31.87515085598584158745735206274835214244E-9,
5.952409123433397121840775361857768840793E-9,
-983.3962625252727762481700554118167053407E-12,
141.9833816279310981293502294934206235767E-12,
-17.63836762507477376452576153429067931778E-12,
1.847218136735103579703061571591169324917E-12,
-158.5867219037691946860458080006154983617E-15,
10.71671155165171945896605850036647942145E-15,
-534.5157855195746774484302577153231731143E-18,
17.49437988842558850795563672241852582957E-18,
-281.8538982024122592948408138611873605876E-21
};
const int n = n_points/2;
insist(2*n+1 == n_points);
insist(1 <= n && n <= NMAX);
int index = 0;
for(int i = 0; i < n; i++)
index += i;
cout << " Gradients npoints: " << 2*n+1 << ", coefficents index: " << index << endl;
size_t size = 0;
checkCudaErrors(cudaGetSymbolSize(&size, gradient_coeffients_dev));
insist(size > n*sizeof(double));
checkCudaErrors(cudaMemcpyToSymbol(gradient_coeffients_dev, a+index, n*sizeof(double)));
}
template<class T> __global__ void gradients2_3d(const int n1, const int n2, const int n3,
const int n2p, const double dx2,
const T *f, T *v, T *g, const int n_points)
{
extern __shared__ double coefs[];
const int n = n_points/2;
for(int i = threadIdx.x; i < n; i += blockDim.x)
coefs[i] = gradient_coeffients_dev[i];
__syncthreads();
const int index = threadIdx.x + blockDim.x*blockIdx.x;
if(index < n1*n3) {
int i = -1; int k = -1;
index_2_ij(index, n1, n3, i, k);
v[index] = f[ijk_2_index(n1, n2, n3, i, n2p, k)];
T g_tmp = 0.0;
for(int ii = 0; ii < n; ii++) {
g_tmp += coefs[ii]*
(f[ijk_2_index(n1, n2, n3, i, n2p+ii+1, k)] - f[ijk_2_index(n1, n2, n3, i, n2p-ii-1, k)]);
}
g[index] = g_tmp/dx2;
}
}
// For complex
template __global__ void gradients2_3d<Complex>(const int n1, const int n2, const int n3,
const int n2p, const double dx2,
const Complex *f, Complex *v, Complex *g,
const int n_points);
// For double
template __global__ void gradients2_3d<double>(const int n1, const int n2, const int n3,
const int n2p, const double dx2,
const double *f, double *v, double *g,
const int n_points);
|
ndWorldSceneCuda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) <2003-2021> <Julio Jerez, Newton Game Dynamics>
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*/
#include <ndWorld.h>
#include <ndModel.h>
#include <ndWorldScene.h>
#include <ndBodyDynamic.h>
#include <ndSkeletonList.h>
#include <ndDynamicsUpdate.h>
#include <ndBodyParticleSet.h>
#include <ndDynamicsUpdateSoa.h>
#include <ndJointBilateralConstraint.h>
#include "cuQuat.h"
#include "cuVector.h"
#include "cuMatrix3x3.h"
#include "cuPrefixScan.h"
#include "ndCudaContext.h"
#include "ndWorldSceneCuda.h"
#include "cuSortBodyAabbCells.h"
#define D_CUDA_SCENE_GRID_SIZE 8.0f
#define D_CUDA_SCENE_INV_GRID_SIZE (1.0f/D_CUDA_SCENE_GRID_SIZE)
__global__ void CudaEndFrame(cuSceneInfo& info, int frameCount)
{
info.m_frameCount = frameCount;
}
template <typename Predicate>
__global__ void CudaInitBodyArray(Predicate InitBodyArray, cuSceneInfo& info)
{
InitBodyArray(info);
}
template <typename Predicate>
__global__ void CudaMergeAabb(Predicate MergeAabb, cuSceneInfo& info)
{
MergeAabb(info);
}
template <typename Predicate>
__global__ void CudaCountAabb(Predicate CountAabb, cuSceneInfo& info)
{
if (info.m_frameIsValid)
{
CountAabb(info);
}
}
template <typename Predicate>
__global__ void CudaValidateGridBuffer(Predicate validateBuffer, cuSceneInfo& info)
{
if (info.m_frameIsValid)
{
validateBuffer(info);
}
}
template <typename Predicate>
__global__ void CudaGenerateGridHash(Predicate GenerateHash, cuSceneInfo& info)
{
if (info.m_frameIsValid)
{
GenerateHash(info);
}
}
template <typename Predicate>
__global__ void CudaGetBodyTransforms(Predicate GetTransform, cuSceneInfo& info, int frameCount)
{
GetTransform(info, frameCount);
}
template <typename Predicate>
__global__ void CudaInitTransforms(Predicate InitTransforms, cuSceneInfo& info)
{
InitTransforms(info);
}
template <typename Predicate>
__global__ void CudaCalculateBodyPairsCount(Predicate CalculateBodyPairsCount, cuSceneInfo& info)
{
if (info.m_frameIsValid)
{
CalculateBodyPairsCount(info);
}
}
ndWorldSceneCuda::ndWorldSceneCuda(const ndWorldScene& src)
:ndWorldScene(src)
,m_context(ndCudaContext::CreateContext())
{
m_bodyListChanged = 1;
}
ndWorldSceneCuda::~ndWorldSceneCuda()
{
if (m_context)
{
delete m_context;
}
}
bool ndWorldSceneCuda::IsValid() const
{
return m_context ? true : false;
}
void ndWorldSceneCuda::Begin()
{
ndWorldScene::Begin();
hipDeviceSynchronize();
hipStream_t stream = m_context->m_solverMemCpyStream;
const ndInt32 frameCounter = m_context->m_frameCounter;
// get the scene info from the update
cuSceneInfo* const gpuInfo = m_context->m_sceneInfoGpu;
cuSceneInfo* const cpuInfo = m_context->m_sceneInfoCpu;
hipError_t cudaStatus = hipMemcpyAsync(cpuInfo, gpuInfo, sizeof(cuSceneInfo), hipMemcpyDeviceToHost, stream);
dAssert(cudaStatus == hipSuccess);
if (cudaStatus != hipSuccess)
{
dAssert(0);
}
CudaEndFrame << < 1, 1, 0, m_context->m_solverComputeStream >> > (*gpuInfo, frameCounter);
if (frameCounter)
{
cuHostBuffer<cuSpatialVector>& cpuBuffer = m_context->m_transformBufferCpu0;
cuDeviceBuffer<cuSpatialVector>& gpuBuffer = (frameCounter & 1) ? m_context->m_transformBufferGpu1 : m_context->m_transformBufferGpu0;
gpuBuffer.WriteData(&cpuBuffer[0], cpuBuffer.GetCount() - 1, stream);
}
}
void ndWorldSceneCuda::End()
{
m_context->m_frameCounter = m_context->m_frameCounter + 1;
m_context->SwapBuffers();
ndWorldScene::End();
}
//void ndWorldSceneCuda::FindCollidingPairs(ndBodyKinematic* const body)
void ndWorldSceneCuda::FindCollidingPairs(ndBodyKinematic* const)
{
dAssert(0);
}
void ndWorldSceneCuda::FindCollidingPairs()
{
//ndWorldScene::FindCollidingPairs();
}
//void ndWorldSceneCuda::CalculateContacts(ndInt32 threadIndex, ndContact* const contact)
void ndWorldSceneCuda::CalculateContacts(ndInt32, ndContact* const)
{
dAssert(0);
}
void ndWorldSceneCuda::CalculateContacts()
{
//ndWorldScene::CalculateContacts();
}
void ndWorldSceneCuda::LoadBodyData()
{
auto CopyBodies = ndMakeObject::ndFunction([this](ndInt32 threadIndex, ndInt32 threadCount)
{
D_TRACKTIME();
const ndVector minBox(ndFloat32(1.0e15f));
const ndVector maxBox(ndFloat32(-1.0e15f));
ndArray<cuBodyProxy>& data = m_context->m_bodyBufferCpu;
cuHostBuffer<cuSpatialVector>& transformBufferCpu0 = m_context->m_transformBufferCpu0;
cuHostBuffer<cuSpatialVector>& transformBufferCpu1 = m_context->m_transformBufferCpu1;
ndArray<ndBodyKinematic*>& bodyArray = GetActiveBodyArray();
const ndStartEnd startEnd(bodyArray.GetCount(), threadIndex, threadCount);
for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
{
cuSpatialVector transform;
ndBodyKinematic* const body = bodyArray[i];
cuBodyProxy& proxi = data[i];
// Get thansform and velocity
proxi.m_mass = body->GetMassMatrix();
proxi.m_rotation = cuQuat(body->GetRotation());
proxi.m_posit = body->GetGlobalGetCentreOfMass();
proxi.m_invIntertia = body->GetInvInertia();
proxi.m_dampCoef = body->GetCachedDamping();
proxi.m_veloc = body->GetVelocity();
proxi.m_omega = body->GetOmega();
// Get scene manager data
const ndShapeInstance& collision = body->GetCollisionShape();
const ndShape* const shape = collision.GetShape();
proxi.m_minAabb = minBox;
proxi.m_maxAabb = maxBox;
proxi.m_obbSize = shape->GetObbSize();
proxi.m_obbOrigin = shape->GetObbOrigin();
proxi.m_scale = collision.GetScale();
proxi.m_localPosition = collision.GetLocalMatrix().m_posit;
proxi.m_localRotation = cuQuat(ndQuaternion(collision.GetLocalMatrix()));
proxi.m_alignRotation = cuQuat(ndQuaternion(collision.GetAlignmentMatrix()));
transform.m_angular = cuQuat(body->GetRotation());
transform.m_linear = body->GetGlobalGetCentreOfMass();
transformBufferCpu0[i] = transform;
transformBufferCpu1[i] = transform;
}
});
auto InitTransforms = [] __device__(const cuSceneInfo & info)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < info.m_bodyArray.m_size)
{
cuBodyProxy* src = info.m_bodyArray.m_array;
cuSpatialVector* dst0 = info.m_transformBuffer0.m_array;
cuSpatialVector* dst1 = info.m_transformBuffer1.m_array;
dst0[index].m_linear = src[index].m_posit;
dst0[index].m_angular = src[index].m_rotation;
dst1[index].m_linear = src[index].m_posit;
dst1[index].m_angular = src[index].m_rotation;
}
};
hipDeviceSynchronize();
const ndArray<ndBodyKinematic*>& bodyArray = GetActiveBodyArray();
const ndInt32 cpuBodyCount = bodyArray.GetCount();
const ndInt32 blocksCount = (cpuBodyCount + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
const ndInt32 gpuBodyCount = (D_THREADS_PER_BLOCK * ((cpuBodyCount + D_THREADS_PER_BLOCK - 1)) / D_THREADS_PER_BLOCK);
ndArray<cuBodyProxy>& bodyBufferCpu = m_context->m_bodyBufferCpu;
bodyBufferCpu.SetCount(cpuBodyCount);
cuDeviceBuffer<unsigned>& histogramGpu = m_context->m_histogram;
cuDeviceBuffer<cuBodyProxy>& bodyBufferGpu = m_context->m_bodyBufferGpu;
cuDeviceBuffer<cuBoundingBox>& boundingBoxGpu = m_context->m_boundingBoxGpu;
cuDeviceBuffer<cuBodyAabbCell>& bodyAabbCellGpu0 = m_context->m_bodyAabbCell;
cuDeviceBuffer<cuBodyAabbCell>& bodyAabbCellGpu1 = m_context->m_bodyAabbCellScrath;
cuHostBuffer<cuSpatialVector>& transformBufferCpu0 = m_context->m_transformBufferCpu0;
cuHostBuffer<cuSpatialVector>& transformBufferCpu1 = m_context->m_transformBufferCpu1;
cuDeviceBuffer<cuSpatialVector>& transformBufferGpu0 = m_context->m_transformBufferGpu0;
cuDeviceBuffer<cuSpatialVector>& transformBufferGpu1 = m_context->m_transformBufferGpu1;
histogramGpu.SetCount(cpuBodyCount);
bodyBufferGpu.SetCount(cpuBodyCount);
bodyAabbCellGpu0.SetCount(cpuBodyCount);
bodyAabbCellGpu1.SetCount(cpuBodyCount);
transformBufferGpu0.SetCount(cpuBodyCount);
transformBufferGpu1.SetCount(cpuBodyCount);
transformBufferCpu0.SetCount(cpuBodyCount);
transformBufferCpu1.SetCount(cpuBodyCount);
boundingBoxGpu.SetCount(gpuBodyCount / D_THREADS_PER_BLOCK);
cuSceneInfo info;
info.m_histogram = cuBuffer<unsigned>(histogramGpu);
info.m_bodyArray = cuBuffer<cuBodyProxy>(bodyBufferGpu);
info.m_bodyAabbArray = cuBuffer<cuBoundingBox>(boundingBoxGpu);
info.m_bodyAabbCell = cuBuffer<cuBodyAabbCell>(bodyAabbCellGpu0);
info.m_bodyAabbCellScrath = cuBuffer<cuBodyAabbCell>(bodyAabbCellGpu1);
info.m_transformBuffer0 = cuBuffer<cuSpatialVector>(transformBufferGpu0);
info.m_transformBuffer1 = cuBuffer<cuSpatialVector>(transformBufferGpu1);
hipError_t cudaStatus;
ParallelExecute(CopyBodies);
*m_context->m_sceneInfoCpu = info;
cudaStatus = hipMemcpy(m_context->m_sceneInfoGpu, &info, sizeof(cuSceneInfo), hipMemcpyHostToDevice);
dAssert(cudaStatus == hipSuccess);
bodyBufferGpu.ReadData(&bodyBufferCpu[0], cpuBodyCount);
CudaInitTransforms << <blocksCount, D_THREADS_PER_BLOCK, 0, 0 >> > (InitTransforms, *m_context->m_sceneInfoCpu);
hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
dAssert(0);
}
}
void ndWorldSceneCuda::GetBodyTransforms()
{
D_TRACKTIME();
auto GetTransform = [] __device__(const cuSceneInfo& info, int frameCount)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < (info.m_bodyArray.m_size - 1))
{
cuBodyProxy* src = info.m_bodyArray.m_array;
cuSpatialVector* dst = (frameCount & 1) ? info.m_transformBuffer0.m_array : info.m_transformBuffer1.m_array;
dst[index].m_linear = src[index].m_posit;
dst[index].m_angular = src[index].m_rotation;
}
};
hipStream_t stream = m_context->m_solverComputeStream;
cuSceneInfo* const infoGpu = m_context->m_sceneInfoGpu;
ndInt32 threads = m_context->m_bodyBufferGpu.GetCount() - 1;
ndInt32 blocks = (threads + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
CudaGetBodyTransforms << <blocks, D_THREADS_PER_BLOCK, 0, stream >> > (GetTransform, *infoGpu, m_context->m_frameCounter);
//cuHostBuffer<cuSpatialVector>& cpuBuffer = m_context->m_transformBufferCpu0;
//cuDeviceBuffer<cuSpatialVector>& gpuBuffer = m_context->m_transformBufferGpu0;
//gpuBuffer.WriteData(&cpuBuffer[0], cpuBuffer.GetCount() - 1, stream);
}
void ndWorldSceneCuda::UpdateTransform()
{
D_TRACKTIME();
GetBodyTransforms();
auto SetTransform = ndMakeObject::ndFunction([this](ndInt32 threadIndex, ndInt32 threadCount)
{
D_TRACKTIME();
const ndArray<ndBodyKinematic*>& bodyArray = GetActiveBodyArray();
const cuSpatialVector* const data = &m_context->m_transformBufferCpu1[0];
const ndStartEnd startEnd(bodyArray.GetCount() - 1, threadIndex, threadCount);
for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
{
ndBodyKinematic* const body = bodyArray[i];
const cuSpatialVector& transform = data[i];
const ndVector position(transform.m_linear.x, transform.m_linear.y, transform.m_linear.z, ndFloat32(1.0f));
const ndQuaternion rotation(ndVector(transform.m_angular.x, transform.m_angular.y, transform.m_angular.z, transform.m_angular.w));
body->SetMatrixAndCentreOfMass(rotation, position);
body->m_transformIsDirty = true;
UpdateTransformNotify(threadIndex, body);
}
});
ParallelExecute(SetTransform);
}
void ndWorldSceneCuda::UpdateBodyList()
{
D_TRACKTIME();
bool bodyListChanged = m_bodyListChanged;
ndWorldScene::UpdateBodyList();
if (bodyListChanged)
{
LoadBodyData();
}
cuSceneInfo* const sceneInfo = m_context->m_sceneInfoCpu;
if (!sceneInfo->m_frameIsValid)
{
hipDeviceSynchronize();
sceneInfo->m_frameIsValid = 1;
if (sceneInfo->m_histogram.m_size > sceneInfo->m_histogram.m_capacity)
{
m_context->m_histogram.SetCount(sceneInfo->m_histogram.m_size);
sceneInfo->m_histogram = cuBuffer<unsigned>(m_context->m_histogram);
}
if (sceneInfo->m_bodyAabbCell.m_size > sceneInfo->m_bodyAabbCell.m_capacity)
{
m_context->m_bodyAabbCell.SetCount(sceneInfo->m_bodyAabbCell.m_size);
m_context->m_bodyAabbCellScrath.SetCount(sceneInfo->m_bodyAabbCell.m_size);
sceneInfo->m_bodyAabbCell = cuBuffer<cuBodyAabbCell>(m_context->m_bodyAabbCell);
sceneInfo->m_bodyAabbCellScrath = cuBuffer<cuBodyAabbCell>(m_context->m_bodyAabbCellScrath);
}
hipError_t cudaStatus = hipMemcpy(m_context->m_sceneInfoGpu, sceneInfo, sizeof(cuSceneInfo), hipMemcpyHostToDevice);
dAssert(cudaStatus == hipSuccess);
if (cudaStatus != hipSuccess)
{
dAssert(0);
}
hipDeviceSynchronize();
}
}
bool ndWorldSceneCuda::SanityCheckPrefix() const
{
cuSceneInfo info;
hipError_t cudaStatus;
hipDeviceSynchronize();
cudaStatus = hipMemcpy(&info, m_context->m_sceneInfoGpu, sizeof(cuSceneInfo), hipMemcpyDeviceToHost);
dAssert(cudaStatus == hipSuccess);
if (info.m_frameIsValid)
{
static ndArray<unsigned> histogram;
histogram.SetCount(info.m_histogram.m_size);
cudaStatus = hipMemcpy(&histogram[0], info.m_histogram.m_array, histogram.GetCount() * sizeof(unsigned), hipMemcpyDeviceToHost);
dAssert(cudaStatus == hipSuccess);
for (int i = 1; i < histogram.GetCount(); i++)
{
dAssert(histogram[i - 1] <= histogram[i]);
}
}
if (cudaStatus != hipSuccess)
{
dAssert(0);
}
return true;
}
bool ndWorldSceneCuda::SanityCheckSortCells() const
{
cuSceneInfo info;
hipError_t cudaStatus;
hipDeviceSynchronize();
cudaStatus = hipMemcpy(&info, m_context->m_sceneInfoGpu, sizeof(cuSceneInfo), hipMemcpyDeviceToHost);
dAssert(cudaStatus == hipSuccess);
if (info.m_frameIsValid)
{
static ndArray<cuBodyAabbCell> bodyAabbCell;
static ndArray<cuBodyAabbCell> bodyAabbCellScrath;
bodyAabbCell.SetCount(info.m_bodyAabbCell.m_size);
bodyAabbCellScrath.SetCount(info.m_bodyAabbCell.m_size);
cudaStatus = hipMemcpy(&bodyAabbCellScrath[0], info.m_bodyAabbCellScrath.m_array, bodyAabbCellScrath.GetCount() * sizeof(cuBodyAabbCell), hipMemcpyDeviceToHost);
dAssert(cudaStatus == hipSuccess);
cudaStatus = hipMemcpy(&bodyAabbCell[0], info.m_bodyAabbCell.m_array, bodyAabbCell.GetCount() * sizeof(cuBodyAabbCell), hipMemcpyDeviceToHost);
dAssert(cudaStatus == hipSuccess);
for (int i = 1; i < bodyAabbCell.GetCount(); i++)
{
cuBodyAabbCell key0(bodyAabbCell[i - 1]);
cuBodyAabbCell key1(bodyAabbCell[i - 0]);
//cuBodyAabbCell key0(bodyAabbCellScrath[i - 1]);
//cuBodyAabbCell key1(bodyAabbCellScrath[i - 0]);
ndUnsigned32 value0 = key0.m_key;
ndUnsigned32 value1 = key1.m_key;
//value0 = key0.m_x + key0.m_y * 1024;
//value1 = key1.m_x + key1.m_y * 1024;
//value0 = key0.m_z;
//value1 = key1.m_z;
bool test = (value0 <= value1);
dAssert(test);
if (!test)
{
break;
}
}
}
if (cudaStatus != hipSuccess)
{
dAssert(0);
}
return true;
}
void ndWorldSceneCuda::InitBodyArray()
{
// ndWorldScene::InitBodyArray();
D_TRACKTIME();
// this has to be recreated in gpu
//ndInt32 scans[D_MAX_THREADS_COUNT][2];
//auto BuildBodyArray = ndMakeObject::ndFunction([this, &scans](ndInt32 threadIndex, ndInt32 threadCount)
//{
// D_TRACKTIME();
// const ndArray<ndBodyKinematic*>& view = GetActiveBodyArray();
//
// ndInt32* const scan = &scans[threadIndex][0];
// scan[0] = 0;
// scan[1] = 0;
//
// const ndFloat32 timestep = m_timestep;
// const ndStartEnd startEnd(view.GetCount() - 1, threadIndex, threadCount);
// for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
// {
// ndBodyKinematic* const body = view[i];
// body->ApplyExternalForces(threadIndex, timestep);
//
// body->PrepareStep(i);
// UpdateAabb(threadIndex, body);
//
// const ndInt32 key = body->m_sceneEquilibrium;
// scan[key] ++;
// }
//});
//auto CompactMovingBodies = ndMakeObject::ndFunction([this, &scans](ndInt32 threadIndex, ndInt32 threadCount)
//{
// D_TRACKTIME();
// const ndArray<ndBodyKinematic*>& activeBodyArray = GetActiveBodyArray();
// ndBodyKinematic** const sceneBodyArray = &m_sceneBodyArray[0];
//
// const ndArray<ndBodyKinematic*>& view = m_bodyList.m_view;
// ndInt32* const scan = &scans[threadIndex][0];
//
// const ndStartEnd startEnd(view.GetCount(), threadIndex, threadCount);
// for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
// {
// ndBodyKinematic* const body = activeBodyArray[i];
// const ndInt32 key = body->m_sceneEquilibrium;
// const ndInt32 index = scan[key];
// sceneBodyArray[index] = body;
// scan[key] ++;
// }
//});
//ParallelExecute(BuildBodyArray);
//ndInt32 sum = 0;
//ndInt32 threadCount = GetThreadCount();
//for (ndInt32 j = 0; j < 2; j++)
//{
// for (ndInt32 i = 0; i < threadCount; ++i)
// {
// const ndInt32 count = scans[i][j];
// scans[i][j] = sum;
// sum += count;
// }
//}
//
//ndInt32 movingBodyCount = scans[0][1] - scans[0][0];
//m_sceneBodyArray.SetCount(m_bodyList.GetCount());
//if (movingBodyCount)
//{
// ParallelExecute(CompactMovingBodies);
//}
//
//m_sceneBodyArray.SetCount(movingBodyCount);
//
//ndBodyKinematic* const sentinelBody = m_sentinelBody;
//sentinelBody->PrepareStep(GetActiveBodyArray().GetCount() - 1);
//
//sentinelBody->m_isStatic = 1;
//sentinelBody->m_autoSleep = 1;
//sentinelBody->m_equilibrium = 1;
//sentinelBody->m_equilibrium0 = 1;
//sentinelBody->m_isJointFence0 = 1;
//sentinelBody->m_isJointFence1 = 1;
//sentinelBody->m_isConstrained = 0;
//sentinelBody->m_sceneEquilibrium = 1;
//sentinelBody->m_weigh = ndFloat32(0.0f);
auto InitBodyArray = [] __device__(cuSceneInfo& info)
{
__shared__ cuBoundingBox cacheAabb[D_THREADS_PER_BLOCK];
const unsigned threadId = threadIdx.x;
const unsigned index = threadId + blockDim.x * blockIdx.x;
const unsigned bodyCount = info.m_bodyArray.m_size - 1;
if (index < bodyCount)
{
cuBodyProxy* bodyArray = info.m_bodyArray.m_array;
cuBodyProxy& body = bodyArray[index];
// calculate shape global Matrix
body.m_globalSphapeRotation = body.m_localRotation * body.m_rotation;
cuMatrix3x3 matrix(body.m_globalSphapeRotation.GetMatrix3x3());
body.m_globalSphapePosition = matrix.RotateVector(body.m_localPosition) + body.m_posit;
matrix.m_front = matrix.m_front.Scale(body.m_scale.x);
matrix.m_up = matrix.m_up.Scale(body.m_scale.y);
matrix.m_right = matrix.m_right.Scale(body.m_scale.z);
matrix = body.m_alignRotation.GetMatrix3x3() * matrix;
const cuVector origin(matrix.RotateVector(body.m_obbOrigin) + body.m_globalSphapePosition);
const cuVector size(matrix.m_front.Abs().Scale(body.m_obbSize.x) + matrix.m_up.Abs().Scale(body.m_obbSize.y) + matrix.m_right.Abs().Scale(body.m_obbSize.z));
const cuVector padding(1.0f / 16.0f);
const cuVector minBox(origin - size - padding);
const cuVector maxBox(origin + size + padding);
// save aabb and calculate bonding box for this thread block
body.m_minAabb = minBox;
body.m_maxAabb = maxBox;
cacheAabb[threadId].m_min = minBox;
cacheAabb[threadId].m_max = maxBox;
}
const unsigned lastBlock = bodyCount / D_THREADS_PER_BLOCK;
if (lastBlock == blockIdx.x)
{
__syncthreads();
const unsigned lastId = bodyCount - D_THREADS_PER_BLOCK * lastBlock;
const cuBoundingBox box(cacheAabb[0]);
if (threadId >= lastId)
{
cacheAabb[threadId] = box;
}
}
__syncthreads();
cuBoundingBox* bBox = info.m_bodyAabbArray.m_array;
for (int i = D_THREADS_PER_BLOCK / 2; i; i = i >> 1)
{
if (threadId < i)
{
cacheAabb[threadId].m_min = cacheAabb[threadId].m_min.Min(cacheAabb[threadId + i].m_min);
cacheAabb[threadId].m_max = cacheAabb[threadId].m_max.Max(cacheAabb[threadId + i].m_max);
}
__syncthreads();
}
if (threadId == 0)
{
bBox[blockIdx.x].m_min = cacheAabb[0].m_min;
bBox[blockIdx.x].m_max = cacheAabb[0].m_max;
}
};
auto MergeAabb = [] __device__(cuSceneInfo& info)
{
__shared__ cuBoundingBox cacheAabb[D_THREADS_PER_BLOCK];
const cuBoundingBox* bBoxOut = info.m_bodyAabbArray.m_array;
const unsigned threadId = threadIdx.x;
const unsigned boxCount = info.m_bodyAabbArray.m_size - 1;
const unsigned aabbBlocks = boxCount / D_THREADS_PER_BLOCK;
const unsigned boxLastRow = boxCount - aabbBlocks * D_THREADS_PER_BLOCK;
cacheAabb[threadId] = bBoxOut[0];
if (threadId < boxLastRow)
{
cacheAabb[threadId] = bBoxOut[aabbBlocks * D_THREADS_PER_BLOCK + threadId];
}
__syncthreads();
unsigned base = 0;
for (int i = 0; i < aabbBlocks; i++)
{
cacheAabb[threadId].m_min = cacheAabb[threadId].m_min.Min(cacheAabb[base + threadId].m_min);
cacheAabb[threadId].m_max = cacheAabb[threadId].m_max.Min(cacheAabb[base + threadId].m_max);
base += D_THREADS_PER_BLOCK;
}
__syncthreads();
for (int i = D_THREADS_PER_BLOCK / 2; i; i = i >> 1)
{
if (threadId < i)
{
cacheAabb[threadId].m_min = cacheAabb[threadId].m_min.Min(cacheAabb[threadId + i].m_min);
cacheAabb[threadId].m_max = cacheAabb[threadId].m_max.Max(cacheAabb[threadId + i].m_max);
}
__syncthreads();
}
if (threadIdx.x == 0)
{
cuVector minBox((cacheAabb[0].m_min.Scale(D_CUDA_SCENE_INV_GRID_SIZE).Floor()).Scale(D_CUDA_SCENE_GRID_SIZE));
cuVector maxBox((cacheAabb[0].m_max.Scale(D_CUDA_SCENE_INV_GRID_SIZE).Floor()).Scale(D_CUDA_SCENE_GRID_SIZE) + cuVector(D_CUDA_SCENE_GRID_SIZE));
minBox.w = 0.0f;
maxBox.w = 0.0f;
info.m_worldBox.m_min = minBox;
info.m_worldBox.m_max = maxBox;
}
};
auto CountAabb = [] __device__(cuSceneInfo& info)
{
__shared__ unsigned cacheBuffer[D_THREADS_PER_BLOCK / 2 + D_THREADS_PER_BLOCK];
const unsigned blockId = blockIdx.x;
const unsigned bodyCount = info.m_bodyArray.m_size - 1;
const unsigned blocks = (bodyCount + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
if (blockId < blocks)
{
const unsigned threadId = threadIdx.x;
const unsigned threadId1 = D_THREADS_PER_BLOCK / 2 + threadId;
const unsigned index = threadId + blockDim.x * blockId;
cacheBuffer[threadId] = 0;
cacheBuffer[threadId1] = 0;
if (index < bodyCount)
{
cuBodyProxy* bodyArray = info.m_bodyArray.m_array;
const cuVector minBox(info.m_worldBox.m_min);
const cuVector bodyBoxMin(bodyArray[index].m_minAabb);
const cuVector bodyBoxMax(bodyArray[index].m_maxAabb);
const int x0 = __float2int_rd((bodyBoxMin.x - minBox.x) * D_CUDA_SCENE_INV_GRID_SIZE);
const int y0 = __float2int_rd((bodyBoxMin.y - minBox.y) * D_CUDA_SCENE_INV_GRID_SIZE);
const int z0 = __float2int_rd((bodyBoxMin.z - minBox.z) * D_CUDA_SCENE_INV_GRID_SIZE);
const int x1 = __float2int_rd((bodyBoxMax.x - minBox.x) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int y1 = __float2int_rd((bodyBoxMax.y - minBox.y) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int z1 = __float2int_rd((bodyBoxMax.z - minBox.z) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int count = (z1 - z0) * (y1 - y0) * (x1 - x0);
cacheBuffer[threadId1] = count;
}
__syncthreads();
for (int i = 1; i < D_THREADS_PER_BLOCK; i = i << 1)
{
int sum = cacheBuffer[threadId1] + cacheBuffer[threadId1 - i];
__syncthreads();
cacheBuffer[threadId1] = sum;
__syncthreads();
}
const unsigned newCapacity = D_PREFIX_SCAN_PASSES * D_THREADS_PER_BLOCK * ((blocks + D_PREFIX_SCAN_PASSES - 1) / D_PREFIX_SCAN_PASSES) + D_THREADS_PER_BLOCK;
if (newCapacity >= info.m_histogram.m_capacity)
{
if (index == 0)
{
#ifdef _DEBUG
printf("function: CountAabb: histogram buffer overflow\n");
#endif
}
info.m_frameIsValid = 0;
info.m_histogram.m_size = info.m_histogram.m_capacity + 1;
}
else
{
unsigned* histogram = info.m_histogram.m_array;
histogram[index] = cacheBuffer[threadId1];
if (index == 0)
{
info.m_histogram.m_size = blocks * D_THREADS_PER_BLOCK;
}
}
}
};
auto GenerateHashGrids = [] __device__(const cuSceneInfo & info)
{
const unsigned threadId = threadIdx.x;
const unsigned index = threadId + blockDim.x * blockIdx.x;
const unsigned bodyCount = info.m_bodyArray.m_size - 1;
if (index < bodyCount)
{
const unsigned* histogram = info.m_histogram.m_array;
const cuBodyProxy* bodyArray = info.m_bodyArray.m_array;
cuBodyAabbCell* hashArray = info.m_bodyAabbCellScrath.m_array;
const cuVector minBox(info.m_worldBox.m_min);
const cuVector bodyBoxMin(bodyArray[index].m_minAabb);
const cuVector bodyBoxMax(bodyArray[index].m_maxAabb);
const int x0 = __float2int_rd((bodyBoxMin.x - minBox.x) * D_CUDA_SCENE_INV_GRID_SIZE);
const int y0 = __float2int_rd((bodyBoxMin.y - minBox.y) * D_CUDA_SCENE_INV_GRID_SIZE);
const int z0 = __float2int_rd((bodyBoxMin.z - minBox.z) * D_CUDA_SCENE_INV_GRID_SIZE);
const int x1 = __float2int_rd((bodyBoxMax.x - minBox.x) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int y1 = __float2int_rd((bodyBoxMax.y - minBox.y) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int z1 = __float2int_rd((bodyBoxMax.z - minBox.z) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
cuBodyAabbCell hash;
hash.m_id = index;
hash.m_key = 0;
unsigned start = index ? histogram[index - 1] : 0;
for (int z = z0; z < z1; z++)
{
hash.m_z = z;
for (int y = y0; y < y1; y++)
{
hash.m_y = y;
for (int x = x0; x < x1; x++)
{
hash.m_x = x;
hashArray[start] = hash;
start++;
}
}
}
}
};
auto ValidateGridArray = [] __device__(cuSceneInfo & info)
{
const unsigned lastIndex = info.m_bodyArray.m_size - 2;
const unsigned* histogram = info.m_histogram.m_array;
const unsigned cellCount = histogram[lastIndex];
if ((cellCount + D_THREADS_PER_BLOCK) > info.m_bodyAabbCellScrath.m_capacity)
{
#ifdef _DEBUG
printf("function: ValidateGridArray: histogram buffer overflow\n");
#endif
info.m_frameIsValid = 0;
info.m_bodyAabbCell.m_size = cellCount + D_THREADS_PER_BLOCK;
info.m_bodyAabbCellScrath.m_size = cellCount + D_THREADS_PER_BLOCK;
}
else
{
cuBodyAabbCell* hashArray = info.m_bodyAabbCell.m_array;
cuBodyAabbCell* hashArrayScrath = info.m_bodyAabbCellScrath.m_array;
cuBodyAabbCell hash;
hash.m_value = 0;
hash.m_id = unsigned(-1);
hash.m_x = unsigned(-1);
hash.m_y = unsigned(-1);
hash.m_z = unsigned(-1);
const long long value = hash.m_value;
hashArray[cellCount].m_value = value;
hashArrayScrath[cellCount].m_value = value;
info.m_bodyAabbCell.m_size = cellCount + 1;
info.m_bodyAabbCellScrath.m_size = cellCount + 1;
}
// check new histogram size.
const unsigned histogramGridBlockSize = (1 << D_AABB_GRID_CELL_BITS);
const unsigned blocksCount = (cellCount + histogramGridBlockSize - 1) / histogramGridBlockSize;
const unsigned newCapacity = (blocksCount + 2) * histogramGridBlockSize;
if (newCapacity >= info.m_histogram.m_capacity)
{
#ifdef _DEBUG
printf("function: ValidateGridArray: histogram buffer overflow\n");
#endif
info.m_frameIsValid = 0;
info.m_histogram.m_size = newCapacity;
}
else
{
info.m_histogram.m_size = blocksCount * histogramGridBlockSize;
}
};
auto CalculateBodyPairsCount = [] __device__(cuSceneInfo & info)
{
__shared__ unsigned cacheBuffer[D_THREADS_PER_BLOCK / 2 + D_THREADS_PER_BLOCK];
const unsigned blockId = blockIdx.x;
const unsigned cellCount = info.m_bodyAabbCell.m_size - 1;
const unsigned blocks = (cellCount + blockDim.x - 1) / blockDim.x;
if (blockId < blocks)
{
const unsigned threadId = threadIdx.x;
const unsigned threadId1 = D_THREADS_PER_BLOCK / 2 + threadId;
int index = threadId + blockDim.x * blockIdx.x;
cacheBuffer[threadId] = 0;
cacheBuffer[threadId1] = 0;
if (index < cellCount)
{
const cuBodyAabbCell* hashArray = info.m_bodyAabbCell.m_array;
unsigned count = 0;
const cuBodyAabbCell& cell = hashArray[index];
for (int i = index + 1; cell.m_key == hashArray[i].m_key; i++)
{
count++;
}
cacheBuffer[threadId1] = count;
}
__syncthreads();
for (int i = 1; i < D_THREADS_PER_BLOCK; i = i << 1)
{
int sum = cacheBuffer[threadId1] + cacheBuffer[threadId1 - i];
__syncthreads();
cacheBuffer[threadId1] = sum;
__syncthreads();
}
if (index < cellCount)
{
unsigned* scan = info.m_histogram.m_array;
const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * D_THREADS_PER_BLOCK;
const unsigned offset = (cellCount + prefixScanSuperBlockAlign) & (-prefixScanSuperBlockAlign);
scan[offset + index] = cacheBuffer[threadId1];
}
}
};
hipStream_t stream = m_context->m_solverComputeStream;
cuSceneInfo* const infoGpu = m_context->m_sceneInfoGpu;
ndInt32 threads = m_context->m_bodyBufferGpu.GetCount() - 1;
ndInt32 bodyBlocksCount = (threads + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
CudaInitBodyArray << <bodyBlocksCount, D_THREADS_PER_BLOCK, 0, stream >> > (InitBodyArray, *infoGpu);
CudaMergeAabb << <1, D_THREADS_PER_BLOCK, 0, stream >> > (MergeAabb, *infoGpu);
CudaCountAabb << <bodyBlocksCount, D_THREADS_PER_BLOCK, 0, stream >> > (CountAabb, *infoGpu);
CudaPrefixScan(m_context, D_THREADS_PER_BLOCK);
dAssert(SanityCheckPrefix());
CudaValidateGridBuffer << <1, 1, 0, stream >> > (ValidateGridArray, *infoGpu);
CudaGenerateGridHash << <bodyBlocksCount, D_THREADS_PER_BLOCK, 0, stream >> > (GenerateHashGrids, *infoGpu);
CudaBodyAabbCellSortBuffer(m_context);
dAssert(SanityCheckSortCells());
// ndInt32 cellsBlocksCount = (m_context->m_bodyAabbCell.m_capacity + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
// dAssert(cellsBlocksCount > 0);
// CudaCalculateBodyPairsCount << <cellsBlocksCount, D_THREADS_PER_BLOCK, 0, stream >> > (CalculateBodyPairsCount, *infoGpu);
////dAssert(SanityCheckPrefix());
//
//
// //auto GetKey____ = [] __device__(const unsigned& item)
// //{
// // return 0;
// //};
// //XXXXXXX << <1, 1, 0, stream >> > (GetKey____);
}
| ndWorldSceneCuda.cu | /* Copyright (c) <2003-2021> <Julio Jerez, Newton Game Dynamics>
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source distribution.
*/
#include <ndWorld.h>
#include <ndModel.h>
#include <ndWorldScene.h>
#include <ndBodyDynamic.h>
#include <ndSkeletonList.h>
#include <ndDynamicsUpdate.h>
#include <ndBodyParticleSet.h>
#include <ndDynamicsUpdateSoa.h>
#include <ndJointBilateralConstraint.h>
#include "cuQuat.h"
#include "cuVector.h"
#include "cuMatrix3x3.h"
#include "cuPrefixScan.h"
#include "ndCudaContext.h"
#include "ndWorldSceneCuda.h"
#include "cuSortBodyAabbCells.h"
#define D_CUDA_SCENE_GRID_SIZE 8.0f
#define D_CUDA_SCENE_INV_GRID_SIZE (1.0f/D_CUDA_SCENE_GRID_SIZE)
__global__ void CudaEndFrame(cuSceneInfo& info, int frameCount)
{
info.m_frameCount = frameCount;
}
template <typename Predicate>
__global__ void CudaInitBodyArray(Predicate InitBodyArray, cuSceneInfo& info)
{
InitBodyArray(info);
}
template <typename Predicate>
__global__ void CudaMergeAabb(Predicate MergeAabb, cuSceneInfo& info)
{
MergeAabb(info);
}
template <typename Predicate>
__global__ void CudaCountAabb(Predicate CountAabb, cuSceneInfo& info)
{
if (info.m_frameIsValid)
{
CountAabb(info);
}
}
template <typename Predicate>
__global__ void CudaValidateGridBuffer(Predicate validateBuffer, cuSceneInfo& info)
{
if (info.m_frameIsValid)
{
validateBuffer(info);
}
}
template <typename Predicate>
__global__ void CudaGenerateGridHash(Predicate GenerateHash, cuSceneInfo& info)
{
if (info.m_frameIsValid)
{
GenerateHash(info);
}
}
template <typename Predicate>
__global__ void CudaGetBodyTransforms(Predicate GetTransform, cuSceneInfo& info, int frameCount)
{
GetTransform(info, frameCount);
}
template <typename Predicate>
__global__ void CudaInitTransforms(Predicate InitTransforms, cuSceneInfo& info)
{
InitTransforms(info);
}
template <typename Predicate>
__global__ void CudaCalculateBodyPairsCount(Predicate CalculateBodyPairsCount, cuSceneInfo& info)
{
if (info.m_frameIsValid)
{
CalculateBodyPairsCount(info);
}
}
ndWorldSceneCuda::ndWorldSceneCuda(const ndWorldScene& src)
:ndWorldScene(src)
,m_context(ndCudaContext::CreateContext())
{
m_bodyListChanged = 1;
}
ndWorldSceneCuda::~ndWorldSceneCuda()
{
if (m_context)
{
delete m_context;
}
}
bool ndWorldSceneCuda::IsValid() const
{
return m_context ? true : false;
}
void ndWorldSceneCuda::Begin()
{
ndWorldScene::Begin();
cudaDeviceSynchronize();
cudaStream_t stream = m_context->m_solverMemCpyStream;
const ndInt32 frameCounter = m_context->m_frameCounter;
// get the scene info from the update
cuSceneInfo* const gpuInfo = m_context->m_sceneInfoGpu;
cuSceneInfo* const cpuInfo = m_context->m_sceneInfoCpu;
cudaError_t cudaStatus = cudaMemcpyAsync(cpuInfo, gpuInfo, sizeof(cuSceneInfo), cudaMemcpyDeviceToHost, stream);
dAssert(cudaStatus == cudaSuccess);
if (cudaStatus != cudaSuccess)
{
dAssert(0);
}
CudaEndFrame << < 1, 1, 0, m_context->m_solverComputeStream >> > (*gpuInfo, frameCounter);
if (frameCounter)
{
cuHostBuffer<cuSpatialVector>& cpuBuffer = m_context->m_transformBufferCpu0;
cuDeviceBuffer<cuSpatialVector>& gpuBuffer = (frameCounter & 1) ? m_context->m_transformBufferGpu1 : m_context->m_transformBufferGpu0;
gpuBuffer.WriteData(&cpuBuffer[0], cpuBuffer.GetCount() - 1, stream);
}
}
void ndWorldSceneCuda::End()
{
m_context->m_frameCounter = m_context->m_frameCounter + 1;
m_context->SwapBuffers();
ndWorldScene::End();
}
//void ndWorldSceneCuda::FindCollidingPairs(ndBodyKinematic* const body)
void ndWorldSceneCuda::FindCollidingPairs(ndBodyKinematic* const)
{
dAssert(0);
}
void ndWorldSceneCuda::FindCollidingPairs()
{
//ndWorldScene::FindCollidingPairs();
}
//void ndWorldSceneCuda::CalculateContacts(ndInt32 threadIndex, ndContact* const contact)
void ndWorldSceneCuda::CalculateContacts(ndInt32, ndContact* const)
{
dAssert(0);
}
void ndWorldSceneCuda::CalculateContacts()
{
//ndWorldScene::CalculateContacts();
}
void ndWorldSceneCuda::LoadBodyData()
{
auto CopyBodies = ndMakeObject::ndFunction([this](ndInt32 threadIndex, ndInt32 threadCount)
{
D_TRACKTIME();
const ndVector minBox(ndFloat32(1.0e15f));
const ndVector maxBox(ndFloat32(-1.0e15f));
ndArray<cuBodyProxy>& data = m_context->m_bodyBufferCpu;
cuHostBuffer<cuSpatialVector>& transformBufferCpu0 = m_context->m_transformBufferCpu0;
cuHostBuffer<cuSpatialVector>& transformBufferCpu1 = m_context->m_transformBufferCpu1;
ndArray<ndBodyKinematic*>& bodyArray = GetActiveBodyArray();
const ndStartEnd startEnd(bodyArray.GetCount(), threadIndex, threadCount);
for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
{
cuSpatialVector transform;
ndBodyKinematic* const body = bodyArray[i];
cuBodyProxy& proxi = data[i];
// Get thansform and velocity
proxi.m_mass = body->GetMassMatrix();
proxi.m_rotation = cuQuat(body->GetRotation());
proxi.m_posit = body->GetGlobalGetCentreOfMass();
proxi.m_invIntertia = body->GetInvInertia();
proxi.m_dampCoef = body->GetCachedDamping();
proxi.m_veloc = body->GetVelocity();
proxi.m_omega = body->GetOmega();
// Get scene manager data
const ndShapeInstance& collision = body->GetCollisionShape();
const ndShape* const shape = collision.GetShape();
proxi.m_minAabb = minBox;
proxi.m_maxAabb = maxBox;
proxi.m_obbSize = shape->GetObbSize();
proxi.m_obbOrigin = shape->GetObbOrigin();
proxi.m_scale = collision.GetScale();
proxi.m_localPosition = collision.GetLocalMatrix().m_posit;
proxi.m_localRotation = cuQuat(ndQuaternion(collision.GetLocalMatrix()));
proxi.m_alignRotation = cuQuat(ndQuaternion(collision.GetAlignmentMatrix()));
transform.m_angular = cuQuat(body->GetRotation());
transform.m_linear = body->GetGlobalGetCentreOfMass();
transformBufferCpu0[i] = transform;
transformBufferCpu1[i] = transform;
}
});
auto InitTransforms = [] __device__(const cuSceneInfo & info)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < info.m_bodyArray.m_size)
{
cuBodyProxy* src = info.m_bodyArray.m_array;
cuSpatialVector* dst0 = info.m_transformBuffer0.m_array;
cuSpatialVector* dst1 = info.m_transformBuffer1.m_array;
dst0[index].m_linear = src[index].m_posit;
dst0[index].m_angular = src[index].m_rotation;
dst1[index].m_linear = src[index].m_posit;
dst1[index].m_angular = src[index].m_rotation;
}
};
cudaDeviceSynchronize();
const ndArray<ndBodyKinematic*>& bodyArray = GetActiveBodyArray();
const ndInt32 cpuBodyCount = bodyArray.GetCount();
const ndInt32 blocksCount = (cpuBodyCount + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
const ndInt32 gpuBodyCount = (D_THREADS_PER_BLOCK * ((cpuBodyCount + D_THREADS_PER_BLOCK - 1)) / D_THREADS_PER_BLOCK);
ndArray<cuBodyProxy>& bodyBufferCpu = m_context->m_bodyBufferCpu;
bodyBufferCpu.SetCount(cpuBodyCount);
cuDeviceBuffer<unsigned>& histogramGpu = m_context->m_histogram;
cuDeviceBuffer<cuBodyProxy>& bodyBufferGpu = m_context->m_bodyBufferGpu;
cuDeviceBuffer<cuBoundingBox>& boundingBoxGpu = m_context->m_boundingBoxGpu;
cuDeviceBuffer<cuBodyAabbCell>& bodyAabbCellGpu0 = m_context->m_bodyAabbCell;
cuDeviceBuffer<cuBodyAabbCell>& bodyAabbCellGpu1 = m_context->m_bodyAabbCellScrath;
cuHostBuffer<cuSpatialVector>& transformBufferCpu0 = m_context->m_transformBufferCpu0;
cuHostBuffer<cuSpatialVector>& transformBufferCpu1 = m_context->m_transformBufferCpu1;
cuDeviceBuffer<cuSpatialVector>& transformBufferGpu0 = m_context->m_transformBufferGpu0;
cuDeviceBuffer<cuSpatialVector>& transformBufferGpu1 = m_context->m_transformBufferGpu1;
histogramGpu.SetCount(cpuBodyCount);
bodyBufferGpu.SetCount(cpuBodyCount);
bodyAabbCellGpu0.SetCount(cpuBodyCount);
bodyAabbCellGpu1.SetCount(cpuBodyCount);
transformBufferGpu0.SetCount(cpuBodyCount);
transformBufferGpu1.SetCount(cpuBodyCount);
transformBufferCpu0.SetCount(cpuBodyCount);
transformBufferCpu1.SetCount(cpuBodyCount);
boundingBoxGpu.SetCount(gpuBodyCount / D_THREADS_PER_BLOCK);
cuSceneInfo info;
info.m_histogram = cuBuffer<unsigned>(histogramGpu);
info.m_bodyArray = cuBuffer<cuBodyProxy>(bodyBufferGpu);
info.m_bodyAabbArray = cuBuffer<cuBoundingBox>(boundingBoxGpu);
info.m_bodyAabbCell = cuBuffer<cuBodyAabbCell>(bodyAabbCellGpu0);
info.m_bodyAabbCellScrath = cuBuffer<cuBodyAabbCell>(bodyAabbCellGpu1);
info.m_transformBuffer0 = cuBuffer<cuSpatialVector>(transformBufferGpu0);
info.m_transformBuffer1 = cuBuffer<cuSpatialVector>(transformBufferGpu1);
cudaError_t cudaStatus;
ParallelExecute(CopyBodies);
*m_context->m_sceneInfoCpu = info;
cudaStatus = cudaMemcpy(m_context->m_sceneInfoGpu, &info, sizeof(cuSceneInfo), cudaMemcpyHostToDevice);
dAssert(cudaStatus == cudaSuccess);
bodyBufferGpu.ReadData(&bodyBufferCpu[0], cpuBodyCount);
CudaInitTransforms << <blocksCount, D_THREADS_PER_BLOCK, 0, 0 >> > (InitTransforms, *m_context->m_sceneInfoCpu);
cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
dAssert(0);
}
}
void ndWorldSceneCuda::GetBodyTransforms()
{
D_TRACKTIME();
auto GetTransform = [] __device__(const cuSceneInfo& info, int frameCount)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < (info.m_bodyArray.m_size - 1))
{
cuBodyProxy* src = info.m_bodyArray.m_array;
cuSpatialVector* dst = (frameCount & 1) ? info.m_transformBuffer0.m_array : info.m_transformBuffer1.m_array;
dst[index].m_linear = src[index].m_posit;
dst[index].m_angular = src[index].m_rotation;
}
};
cudaStream_t stream = m_context->m_solverComputeStream;
cuSceneInfo* const infoGpu = m_context->m_sceneInfoGpu;
ndInt32 threads = m_context->m_bodyBufferGpu.GetCount() - 1;
ndInt32 blocks = (threads + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
CudaGetBodyTransforms << <blocks, D_THREADS_PER_BLOCK, 0, stream >> > (GetTransform, *infoGpu, m_context->m_frameCounter);
//cuHostBuffer<cuSpatialVector>& cpuBuffer = m_context->m_transformBufferCpu0;
//cuDeviceBuffer<cuSpatialVector>& gpuBuffer = m_context->m_transformBufferGpu0;
//gpuBuffer.WriteData(&cpuBuffer[0], cpuBuffer.GetCount() - 1, stream);
}
void ndWorldSceneCuda::UpdateTransform()
{
D_TRACKTIME();
GetBodyTransforms();
auto SetTransform = ndMakeObject::ndFunction([this](ndInt32 threadIndex, ndInt32 threadCount)
{
D_TRACKTIME();
const ndArray<ndBodyKinematic*>& bodyArray = GetActiveBodyArray();
const cuSpatialVector* const data = &m_context->m_transformBufferCpu1[0];
const ndStartEnd startEnd(bodyArray.GetCount() - 1, threadIndex, threadCount);
for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
{
ndBodyKinematic* const body = bodyArray[i];
const cuSpatialVector& transform = data[i];
const ndVector position(transform.m_linear.x, transform.m_linear.y, transform.m_linear.z, ndFloat32(1.0f));
const ndQuaternion rotation(ndVector(transform.m_angular.x, transform.m_angular.y, transform.m_angular.z, transform.m_angular.w));
body->SetMatrixAndCentreOfMass(rotation, position);
body->m_transformIsDirty = true;
UpdateTransformNotify(threadIndex, body);
}
});
ParallelExecute(SetTransform);
}
void ndWorldSceneCuda::UpdateBodyList()
{
D_TRACKTIME();
bool bodyListChanged = m_bodyListChanged;
ndWorldScene::UpdateBodyList();
if (bodyListChanged)
{
LoadBodyData();
}
cuSceneInfo* const sceneInfo = m_context->m_sceneInfoCpu;
if (!sceneInfo->m_frameIsValid)
{
cudaDeviceSynchronize();
sceneInfo->m_frameIsValid = 1;
if (sceneInfo->m_histogram.m_size > sceneInfo->m_histogram.m_capacity)
{
m_context->m_histogram.SetCount(sceneInfo->m_histogram.m_size);
sceneInfo->m_histogram = cuBuffer<unsigned>(m_context->m_histogram);
}
if (sceneInfo->m_bodyAabbCell.m_size > sceneInfo->m_bodyAabbCell.m_capacity)
{
m_context->m_bodyAabbCell.SetCount(sceneInfo->m_bodyAabbCell.m_size);
m_context->m_bodyAabbCellScrath.SetCount(sceneInfo->m_bodyAabbCell.m_size);
sceneInfo->m_bodyAabbCell = cuBuffer<cuBodyAabbCell>(m_context->m_bodyAabbCell);
sceneInfo->m_bodyAabbCellScrath = cuBuffer<cuBodyAabbCell>(m_context->m_bodyAabbCellScrath);
}
cudaError_t cudaStatus = cudaMemcpy(m_context->m_sceneInfoGpu, sceneInfo, sizeof(cuSceneInfo), cudaMemcpyHostToDevice);
dAssert(cudaStatus == cudaSuccess);
if (cudaStatus != cudaSuccess)
{
dAssert(0);
}
cudaDeviceSynchronize();
}
}
bool ndWorldSceneCuda::SanityCheckPrefix() const
{
cuSceneInfo info;
cudaError_t cudaStatus;
cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(&info, m_context->m_sceneInfoGpu, sizeof(cuSceneInfo), cudaMemcpyDeviceToHost);
dAssert(cudaStatus == cudaSuccess);
if (info.m_frameIsValid)
{
static ndArray<unsigned> histogram;
histogram.SetCount(info.m_histogram.m_size);
cudaStatus = cudaMemcpy(&histogram[0], info.m_histogram.m_array, histogram.GetCount() * sizeof(unsigned), cudaMemcpyDeviceToHost);
dAssert(cudaStatus == cudaSuccess);
for (int i = 1; i < histogram.GetCount(); i++)
{
dAssert(histogram[i - 1] <= histogram[i]);
}
}
if (cudaStatus != cudaSuccess)
{
dAssert(0);
}
return true;
}
bool ndWorldSceneCuda::SanityCheckSortCells() const
{
cuSceneInfo info;
cudaError_t cudaStatus;
cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(&info, m_context->m_sceneInfoGpu, sizeof(cuSceneInfo), cudaMemcpyDeviceToHost);
dAssert(cudaStatus == cudaSuccess);
if (info.m_frameIsValid)
{
static ndArray<cuBodyAabbCell> bodyAabbCell;
static ndArray<cuBodyAabbCell> bodyAabbCellScrath;
bodyAabbCell.SetCount(info.m_bodyAabbCell.m_size);
bodyAabbCellScrath.SetCount(info.m_bodyAabbCell.m_size);
cudaStatus = cudaMemcpy(&bodyAabbCellScrath[0], info.m_bodyAabbCellScrath.m_array, bodyAabbCellScrath.GetCount() * sizeof(cuBodyAabbCell), cudaMemcpyDeviceToHost);
dAssert(cudaStatus == cudaSuccess);
cudaStatus = cudaMemcpy(&bodyAabbCell[0], info.m_bodyAabbCell.m_array, bodyAabbCell.GetCount() * sizeof(cuBodyAabbCell), cudaMemcpyDeviceToHost);
dAssert(cudaStatus == cudaSuccess);
for (int i = 1; i < bodyAabbCell.GetCount(); i++)
{
cuBodyAabbCell key0(bodyAabbCell[i - 1]);
cuBodyAabbCell key1(bodyAabbCell[i - 0]);
//cuBodyAabbCell key0(bodyAabbCellScrath[i - 1]);
//cuBodyAabbCell key1(bodyAabbCellScrath[i - 0]);
ndUnsigned32 value0 = key0.m_key;
ndUnsigned32 value1 = key1.m_key;
//value0 = key0.m_x + key0.m_y * 1024;
//value1 = key1.m_x + key1.m_y * 1024;
//value0 = key0.m_z;
//value1 = key1.m_z;
bool test = (value0 <= value1);
dAssert(test);
if (!test)
{
break;
}
}
}
if (cudaStatus != cudaSuccess)
{
dAssert(0);
}
return true;
}
void ndWorldSceneCuda::InitBodyArray()
{
// ndWorldScene::InitBodyArray();
D_TRACKTIME();
// this has to be recreated in gpu
//ndInt32 scans[D_MAX_THREADS_COUNT][2];
//auto BuildBodyArray = ndMakeObject::ndFunction([this, &scans](ndInt32 threadIndex, ndInt32 threadCount)
//{
// D_TRACKTIME();
// const ndArray<ndBodyKinematic*>& view = GetActiveBodyArray();
//
// ndInt32* const scan = &scans[threadIndex][0];
// scan[0] = 0;
// scan[1] = 0;
//
// const ndFloat32 timestep = m_timestep;
// const ndStartEnd startEnd(view.GetCount() - 1, threadIndex, threadCount);
// for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
// {
// ndBodyKinematic* const body = view[i];
// body->ApplyExternalForces(threadIndex, timestep);
//
// body->PrepareStep(i);
// UpdateAabb(threadIndex, body);
//
// const ndInt32 key = body->m_sceneEquilibrium;
// scan[key] ++;
// }
//});
//auto CompactMovingBodies = ndMakeObject::ndFunction([this, &scans](ndInt32 threadIndex, ndInt32 threadCount)
//{
// D_TRACKTIME();
// const ndArray<ndBodyKinematic*>& activeBodyArray = GetActiveBodyArray();
// ndBodyKinematic** const sceneBodyArray = &m_sceneBodyArray[0];
//
// const ndArray<ndBodyKinematic*>& view = m_bodyList.m_view;
// ndInt32* const scan = &scans[threadIndex][0];
//
// const ndStartEnd startEnd(view.GetCount(), threadIndex, threadCount);
// for (ndInt32 i = startEnd.m_start; i < startEnd.m_end; ++i)
// {
// ndBodyKinematic* const body = activeBodyArray[i];
// const ndInt32 key = body->m_sceneEquilibrium;
// const ndInt32 index = scan[key];
// sceneBodyArray[index] = body;
// scan[key] ++;
// }
//});
//ParallelExecute(BuildBodyArray);
//ndInt32 sum = 0;
//ndInt32 threadCount = GetThreadCount();
//for (ndInt32 j = 0; j < 2; j++)
//{
// for (ndInt32 i = 0; i < threadCount; ++i)
// {
// const ndInt32 count = scans[i][j];
// scans[i][j] = sum;
// sum += count;
// }
//}
//
//ndInt32 movingBodyCount = scans[0][1] - scans[0][0];
//m_sceneBodyArray.SetCount(m_bodyList.GetCount());
//if (movingBodyCount)
//{
// ParallelExecute(CompactMovingBodies);
//}
//
//m_sceneBodyArray.SetCount(movingBodyCount);
//
//ndBodyKinematic* const sentinelBody = m_sentinelBody;
//sentinelBody->PrepareStep(GetActiveBodyArray().GetCount() - 1);
//
//sentinelBody->m_isStatic = 1;
//sentinelBody->m_autoSleep = 1;
//sentinelBody->m_equilibrium = 1;
//sentinelBody->m_equilibrium0 = 1;
//sentinelBody->m_isJointFence0 = 1;
//sentinelBody->m_isJointFence1 = 1;
//sentinelBody->m_isConstrained = 0;
//sentinelBody->m_sceneEquilibrium = 1;
//sentinelBody->m_weigh = ndFloat32(0.0f);
auto InitBodyArray = [] __device__(cuSceneInfo& info)
{
__shared__ cuBoundingBox cacheAabb[D_THREADS_PER_BLOCK];
const unsigned threadId = threadIdx.x;
const unsigned index = threadId + blockDim.x * blockIdx.x;
const unsigned bodyCount = info.m_bodyArray.m_size - 1;
if (index < bodyCount)
{
cuBodyProxy* bodyArray = info.m_bodyArray.m_array;
cuBodyProxy& body = bodyArray[index];
// calculate shape global Matrix
body.m_globalSphapeRotation = body.m_localRotation * body.m_rotation;
cuMatrix3x3 matrix(body.m_globalSphapeRotation.GetMatrix3x3());
body.m_globalSphapePosition = matrix.RotateVector(body.m_localPosition) + body.m_posit;
matrix.m_front = matrix.m_front.Scale(body.m_scale.x);
matrix.m_up = matrix.m_up.Scale(body.m_scale.y);
matrix.m_right = matrix.m_right.Scale(body.m_scale.z);
matrix = body.m_alignRotation.GetMatrix3x3() * matrix;
const cuVector origin(matrix.RotateVector(body.m_obbOrigin) + body.m_globalSphapePosition);
const cuVector size(matrix.m_front.Abs().Scale(body.m_obbSize.x) + matrix.m_up.Abs().Scale(body.m_obbSize.y) + matrix.m_right.Abs().Scale(body.m_obbSize.z));
const cuVector padding(1.0f / 16.0f);
const cuVector minBox(origin - size - padding);
const cuVector maxBox(origin + size + padding);
// save aabb and calculate bonding box for this thread block
body.m_minAabb = minBox;
body.m_maxAabb = maxBox;
cacheAabb[threadId].m_min = minBox;
cacheAabb[threadId].m_max = maxBox;
}
const unsigned lastBlock = bodyCount / D_THREADS_PER_BLOCK;
if (lastBlock == blockIdx.x)
{
__syncthreads();
const unsigned lastId = bodyCount - D_THREADS_PER_BLOCK * lastBlock;
const cuBoundingBox box(cacheAabb[0]);
if (threadId >= lastId)
{
cacheAabb[threadId] = box;
}
}
__syncthreads();
cuBoundingBox* bBox = info.m_bodyAabbArray.m_array;
for (int i = D_THREADS_PER_BLOCK / 2; i; i = i >> 1)
{
if (threadId < i)
{
cacheAabb[threadId].m_min = cacheAabb[threadId].m_min.Min(cacheAabb[threadId + i].m_min);
cacheAabb[threadId].m_max = cacheAabb[threadId].m_max.Max(cacheAabb[threadId + i].m_max);
}
__syncthreads();
}
if (threadId == 0)
{
bBox[blockIdx.x].m_min = cacheAabb[0].m_min;
bBox[blockIdx.x].m_max = cacheAabb[0].m_max;
}
};
auto MergeAabb = [] __device__(cuSceneInfo& info)
{
__shared__ cuBoundingBox cacheAabb[D_THREADS_PER_BLOCK];
const cuBoundingBox* bBoxOut = info.m_bodyAabbArray.m_array;
const unsigned threadId = threadIdx.x;
const unsigned boxCount = info.m_bodyAabbArray.m_size - 1;
const unsigned aabbBlocks = boxCount / D_THREADS_PER_BLOCK;
const unsigned boxLastRow = boxCount - aabbBlocks * D_THREADS_PER_BLOCK;
cacheAabb[threadId] = bBoxOut[0];
if (threadId < boxLastRow)
{
cacheAabb[threadId] = bBoxOut[aabbBlocks * D_THREADS_PER_BLOCK + threadId];
}
__syncthreads();
unsigned base = 0;
for (int i = 0; i < aabbBlocks; i++)
{
cacheAabb[threadId].m_min = cacheAabb[threadId].m_min.Min(cacheAabb[base + threadId].m_min);
cacheAabb[threadId].m_max = cacheAabb[threadId].m_max.Min(cacheAabb[base + threadId].m_max);
base += D_THREADS_PER_BLOCK;
}
__syncthreads();
for (int i = D_THREADS_PER_BLOCK / 2; i; i = i >> 1)
{
if (threadId < i)
{
cacheAabb[threadId].m_min = cacheAabb[threadId].m_min.Min(cacheAabb[threadId + i].m_min);
cacheAabb[threadId].m_max = cacheAabb[threadId].m_max.Max(cacheAabb[threadId + i].m_max);
}
__syncthreads();
}
if (threadIdx.x == 0)
{
cuVector minBox((cacheAabb[0].m_min.Scale(D_CUDA_SCENE_INV_GRID_SIZE).Floor()).Scale(D_CUDA_SCENE_GRID_SIZE));
cuVector maxBox((cacheAabb[0].m_max.Scale(D_CUDA_SCENE_INV_GRID_SIZE).Floor()).Scale(D_CUDA_SCENE_GRID_SIZE) + cuVector(D_CUDA_SCENE_GRID_SIZE));
minBox.w = 0.0f;
maxBox.w = 0.0f;
info.m_worldBox.m_min = minBox;
info.m_worldBox.m_max = maxBox;
}
};
auto CountAabb = [] __device__(cuSceneInfo& info)
{
__shared__ unsigned cacheBuffer[D_THREADS_PER_BLOCK / 2 + D_THREADS_PER_BLOCK];
const unsigned blockId = blockIdx.x;
const unsigned bodyCount = info.m_bodyArray.m_size - 1;
const unsigned blocks = (bodyCount + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
if (blockId < blocks)
{
const unsigned threadId = threadIdx.x;
const unsigned threadId1 = D_THREADS_PER_BLOCK / 2 + threadId;
const unsigned index = threadId + blockDim.x * blockId;
cacheBuffer[threadId] = 0;
cacheBuffer[threadId1] = 0;
if (index < bodyCount)
{
cuBodyProxy* bodyArray = info.m_bodyArray.m_array;
const cuVector minBox(info.m_worldBox.m_min);
const cuVector bodyBoxMin(bodyArray[index].m_minAabb);
const cuVector bodyBoxMax(bodyArray[index].m_maxAabb);
const int x0 = __float2int_rd((bodyBoxMin.x - minBox.x) * D_CUDA_SCENE_INV_GRID_SIZE);
const int y0 = __float2int_rd((bodyBoxMin.y - minBox.y) * D_CUDA_SCENE_INV_GRID_SIZE);
const int z0 = __float2int_rd((bodyBoxMin.z - minBox.z) * D_CUDA_SCENE_INV_GRID_SIZE);
const int x1 = __float2int_rd((bodyBoxMax.x - minBox.x) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int y1 = __float2int_rd((bodyBoxMax.y - minBox.y) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int z1 = __float2int_rd((bodyBoxMax.z - minBox.z) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int count = (z1 - z0) * (y1 - y0) * (x1 - x0);
cacheBuffer[threadId1] = count;
}
__syncthreads();
for (int i = 1; i < D_THREADS_PER_BLOCK; i = i << 1)
{
int sum = cacheBuffer[threadId1] + cacheBuffer[threadId1 - i];
__syncthreads();
cacheBuffer[threadId1] = sum;
__syncthreads();
}
const unsigned newCapacity = D_PREFIX_SCAN_PASSES * D_THREADS_PER_BLOCK * ((blocks + D_PREFIX_SCAN_PASSES - 1) / D_PREFIX_SCAN_PASSES) + D_THREADS_PER_BLOCK;
if (newCapacity >= info.m_histogram.m_capacity)
{
if (index == 0)
{
#ifdef _DEBUG
printf("function: CountAabb: histogram buffer overflow\n");
#endif
}
info.m_frameIsValid = 0;
info.m_histogram.m_size = info.m_histogram.m_capacity + 1;
}
else
{
unsigned* histogram = info.m_histogram.m_array;
histogram[index] = cacheBuffer[threadId1];
if (index == 0)
{
info.m_histogram.m_size = blocks * D_THREADS_PER_BLOCK;
}
}
}
};
auto GenerateHashGrids = [] __device__(const cuSceneInfo & info)
{
const unsigned threadId = threadIdx.x;
const unsigned index = threadId + blockDim.x * blockIdx.x;
const unsigned bodyCount = info.m_bodyArray.m_size - 1;
if (index < bodyCount)
{
const unsigned* histogram = info.m_histogram.m_array;
const cuBodyProxy* bodyArray = info.m_bodyArray.m_array;
cuBodyAabbCell* hashArray = info.m_bodyAabbCellScrath.m_array;
const cuVector minBox(info.m_worldBox.m_min);
const cuVector bodyBoxMin(bodyArray[index].m_minAabb);
const cuVector bodyBoxMax(bodyArray[index].m_maxAabb);
const int x0 = __float2int_rd((bodyBoxMin.x - minBox.x) * D_CUDA_SCENE_INV_GRID_SIZE);
const int y0 = __float2int_rd((bodyBoxMin.y - minBox.y) * D_CUDA_SCENE_INV_GRID_SIZE);
const int z0 = __float2int_rd((bodyBoxMin.z - minBox.z) * D_CUDA_SCENE_INV_GRID_SIZE);
const int x1 = __float2int_rd((bodyBoxMax.x - minBox.x) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int y1 = __float2int_rd((bodyBoxMax.y - minBox.y) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
const int z1 = __float2int_rd((bodyBoxMax.z - minBox.z) * D_CUDA_SCENE_INV_GRID_SIZE) + 1;
cuBodyAabbCell hash;
hash.m_id = index;
hash.m_key = 0;
unsigned start = index ? histogram[index - 1] : 0;
for (int z = z0; z < z1; z++)
{
hash.m_z = z;
for (int y = y0; y < y1; y++)
{
hash.m_y = y;
for (int x = x0; x < x1; x++)
{
hash.m_x = x;
hashArray[start] = hash;
start++;
}
}
}
}
};
auto ValidateGridArray = [] __device__(cuSceneInfo & info)
{
const unsigned lastIndex = info.m_bodyArray.m_size - 2;
const unsigned* histogram = info.m_histogram.m_array;
const unsigned cellCount = histogram[lastIndex];
if ((cellCount + D_THREADS_PER_BLOCK) > info.m_bodyAabbCellScrath.m_capacity)
{
#ifdef _DEBUG
printf("function: ValidateGridArray: histogram buffer overflow\n");
#endif
info.m_frameIsValid = 0;
info.m_bodyAabbCell.m_size = cellCount + D_THREADS_PER_BLOCK;
info.m_bodyAabbCellScrath.m_size = cellCount + D_THREADS_PER_BLOCK;
}
else
{
cuBodyAabbCell* hashArray = info.m_bodyAabbCell.m_array;
cuBodyAabbCell* hashArrayScrath = info.m_bodyAabbCellScrath.m_array;
cuBodyAabbCell hash;
hash.m_value = 0;
hash.m_id = unsigned(-1);
hash.m_x = unsigned(-1);
hash.m_y = unsigned(-1);
hash.m_z = unsigned(-1);
const long long value = hash.m_value;
hashArray[cellCount].m_value = value;
hashArrayScrath[cellCount].m_value = value;
info.m_bodyAabbCell.m_size = cellCount + 1;
info.m_bodyAabbCellScrath.m_size = cellCount + 1;
}
// check new histogram size.
const unsigned histogramGridBlockSize = (1 << D_AABB_GRID_CELL_BITS);
const unsigned blocksCount = (cellCount + histogramGridBlockSize - 1) / histogramGridBlockSize;
const unsigned newCapacity = (blocksCount + 2) * histogramGridBlockSize;
if (newCapacity >= info.m_histogram.m_capacity)
{
#ifdef _DEBUG
printf("function: ValidateGridArray: histogram buffer overflow\n");
#endif
info.m_frameIsValid = 0;
info.m_histogram.m_size = newCapacity;
}
else
{
info.m_histogram.m_size = blocksCount * histogramGridBlockSize;
}
};
auto CalculateBodyPairsCount = [] __device__(cuSceneInfo & info)
{
__shared__ unsigned cacheBuffer[D_THREADS_PER_BLOCK / 2 + D_THREADS_PER_BLOCK];
const unsigned blockId = blockIdx.x;
const unsigned cellCount = info.m_bodyAabbCell.m_size - 1;
const unsigned blocks = (cellCount + blockDim.x - 1) / blockDim.x;
if (blockId < blocks)
{
const unsigned threadId = threadIdx.x;
const unsigned threadId1 = D_THREADS_PER_BLOCK / 2 + threadId;
int index = threadId + blockDim.x * blockIdx.x;
cacheBuffer[threadId] = 0;
cacheBuffer[threadId1] = 0;
if (index < cellCount)
{
const cuBodyAabbCell* hashArray = info.m_bodyAabbCell.m_array;
unsigned count = 0;
const cuBodyAabbCell& cell = hashArray[index];
for (int i = index + 1; cell.m_key == hashArray[i].m_key; i++)
{
count++;
}
cacheBuffer[threadId1] = count;
}
__syncthreads();
for (int i = 1; i < D_THREADS_PER_BLOCK; i = i << 1)
{
int sum = cacheBuffer[threadId1] + cacheBuffer[threadId1 - i];
__syncthreads();
cacheBuffer[threadId1] = sum;
__syncthreads();
}
if (index < cellCount)
{
unsigned* scan = info.m_histogram.m_array;
const unsigned prefixScanSuperBlockAlign = D_PREFIX_SCAN_PASSES * D_THREADS_PER_BLOCK;
const unsigned offset = (cellCount + prefixScanSuperBlockAlign) & (-prefixScanSuperBlockAlign);
scan[offset + index] = cacheBuffer[threadId1];
}
}
};
cudaStream_t stream = m_context->m_solverComputeStream;
cuSceneInfo* const infoGpu = m_context->m_sceneInfoGpu;
ndInt32 threads = m_context->m_bodyBufferGpu.GetCount() - 1;
ndInt32 bodyBlocksCount = (threads + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
CudaInitBodyArray << <bodyBlocksCount, D_THREADS_PER_BLOCK, 0, stream >> > (InitBodyArray, *infoGpu);
CudaMergeAabb << <1, D_THREADS_PER_BLOCK, 0, stream >> > (MergeAabb, *infoGpu);
CudaCountAabb << <bodyBlocksCount, D_THREADS_PER_BLOCK, 0, stream >> > (CountAabb, *infoGpu);
CudaPrefixScan(m_context, D_THREADS_PER_BLOCK);
dAssert(SanityCheckPrefix());
CudaValidateGridBuffer << <1, 1, 0, stream >> > (ValidateGridArray, *infoGpu);
CudaGenerateGridHash << <bodyBlocksCount, D_THREADS_PER_BLOCK, 0, stream >> > (GenerateHashGrids, *infoGpu);
CudaBodyAabbCellSortBuffer(m_context);
dAssert(SanityCheckSortCells());
// ndInt32 cellsBlocksCount = (m_context->m_bodyAabbCell.m_capacity + D_THREADS_PER_BLOCK - 1) / D_THREADS_PER_BLOCK;
// dAssert(cellsBlocksCount > 0);
// CudaCalculateBodyPairsCount << <cellsBlocksCount, D_THREADS_PER_BLOCK, 0, stream >> > (CalculateBodyPairsCount, *infoGpu);
////dAssert(SanityCheckPrefix());
//
//
// //auto GetKey____ = [] __device__(const unsigned& item)
// //{
// // return 0;
// //};
// //XXXXXXX << <1, 1, 0, stream >> > (GetKey____);
}
|
c8315158c0a4b2b269a90e7d27ba5b362f66829e.hip | // !!! This is a file automatically generated by hipify!!!
#include "DNN.cuh"
#include "Utils/DataSet.cuh"
#include <chrono>
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::milliseconds ms;
typedef std::chrono::duration<float> fsec;
int main() {
hipblasCreate(&handle);
NeuralNet net;
Layer* layer1 = new FullyConnected<ReLU>(28 * 28, 20);
Layer* layer2 = new FullyConnected<Sigmoid>(20, 10);
SGD opt;
VerboseCallback callback;
net.add_layer(layer1);
net.add_layer(layer2);
net.set_output(new RegressionMSE());
net.set_callback(callback);
Matrix x, y;
int width, height;
internal::ReadMNIST("C:/Users/NitroPC/source/repos/NeuralNetwork_CUDA_2/NeuralNetwork_CUDA_2/images/test-images.d24",x,width,height);
internal::ReadMNIST_label("C:/Users/NitroPC/source/repos/NeuralNetwork_CUDA_2/NeuralNetwork_CUDA_2/images/test-labels.d24", y);
///std::cout << "input:\n";
///x.copyHostToDevice();x.print();
///std::cout << "target:\n";
///y.copyHostToDevice();
///y.print();
net.init(0, 0.01f, 1);
auto t0 = Time::now();
net.fit(opt, x, y, 1024, 1000, 1);
auto t1 = Time::now();
fsec fs = t1 - t0;
std::cout << fs.count() << "s\n";
hipblasDestroy(handle);
getchar();
return 0;
}
/*int main() {
hipblasCreate(&handle);
NeuralNet net;
Layer* layer1 = new FullyConnected<Sigmoid>(4, 2);
Layer* layer2 = new FullyConnected<Sigmoid>(2, 1);
SGD opt;
VerboseCallback callback;
net.add_layer(layer1);
net.add_layer(layer2);
net.set_output(new RegressionMSE());
net.set_callback(callback);
Matrix x, y;
x.resize(4, 2); x.allocateMemory();
x[0] = 1;x[1] = 5;x[2] = 2;x[3] = 6;x[4] = 3;x[5] = 7;x[6] = 4;x[7] = 8;
x.copyHostToDevice();
y.resize(1, 2); y.allocateMemory();
y[0] = 0.2f; y[1] = 0.2f;
y.copyHostToDevice();
std::cout << "input:\n";
x.print();
std::cout << "target:\n";
y.print();
net.init(0, 1.f, 1);
net.fit(opt, x, y, 2, 200, 1);
hipblasDestroy(handle);
return 0;
}*/ | c8315158c0a4b2b269a90e7d27ba5b362f66829e.cu | #include "DNN.cuh"
#include "Utils/DataSet.cuh"
#include <chrono>
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::milliseconds ms;
typedef std::chrono::duration<float> fsec;
int main() {
cublasCreate(&handle);
NeuralNet net;
Layer* layer1 = new FullyConnected<ReLU>(28 * 28, 20);
Layer* layer2 = new FullyConnected<Sigmoid>(20, 10);
SGD opt;
VerboseCallback callback;
net.add_layer(layer1);
net.add_layer(layer2);
net.set_output(new RegressionMSE());
net.set_callback(callback);
Matrix x, y;
int width, height;
internal::ReadMNIST("C:/Users/NitroPC/source/repos/NeuralNetwork_CUDA_2/NeuralNetwork_CUDA_2/images/test-images.d24",x,width,height);
internal::ReadMNIST_label("C:/Users/NitroPC/source/repos/NeuralNetwork_CUDA_2/NeuralNetwork_CUDA_2/images/test-labels.d24", y);
///std::cout << "input:\n";
///x.copyHostToDevice();x.print();
///std::cout << "target:\n";
///y.copyHostToDevice();
///y.print();
net.init(0, 0.01f, 1);
auto t0 = Time::now();
net.fit(opt, x, y, 1024, 1000, 1);
auto t1 = Time::now();
fsec fs = t1 - t0;
std::cout << fs.count() << "s\n";
cublasDestroy(handle);
getchar();
return 0;
}
/*int main() {
cublasCreate(&handle);
NeuralNet net;
Layer* layer1 = new FullyConnected<Sigmoid>(4, 2);
Layer* layer2 = new FullyConnected<Sigmoid>(2, 1);
SGD opt;
VerboseCallback callback;
net.add_layer(layer1);
net.add_layer(layer2);
net.set_output(new RegressionMSE());
net.set_callback(callback);
Matrix x, y;
x.resize(4, 2); x.allocateMemory();
x[0] = 1;x[1] = 5;x[2] = 2;x[3] = 6;x[4] = 3;x[5] = 7;x[6] = 4;x[7] = 8;
x.copyHostToDevice();
y.resize(1, 2); y.allocateMemory();
y[0] = 0.2f; y[1] = 0.2f;
y.copyHostToDevice();
std::cout << "input:\n";
x.print();
std::cout << "target:\n";
y.print();
net.init(0, 1.f, 1);
net.fit(opt, x, y, 2, 200, 1);
cublasDestroy(handle);
return 0;
}*/ |
bd28d9b7b289b4a85132d30833f4fb00290aa20f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2016 Stanford University, NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "realm_saxpy.h"
__global__
void gpu_saxpy(const float alpha,
//const int num_elements,
Rect<1> bounds,
RegionAccessor<AccessorType::Affine<1>, float> ra_x,
RegionAccessor<AccessorType::Affine<1>, float> ra_y,
RegionAccessor<AccessorType::Affine<1>, float> ra_z)
// const float *x, const float *y, float *z)
{
Point<1> p = bounds.lo + make_point(blockIdx.x*blockDim.x+threadIdx.x);
if(bounds.contains(p))
ra_z[p] = alpha * ra_x[p] + ra_y[p];
}
__host__
void gpu_saxpy_task(const void *args, size_t arglen,
const void *userdata, size_t userlen, Processor p)
{
assert(arglen == sizeof(SaxpyArgs));
const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args;
printf("Running GPU Saxpy Task\n\n");
// get the generic accessors for each of our three instances
RegionAccessor<AccessorType::Generic> ra_xg = saxpy_args->x_inst.get_accessor();
RegionAccessor<AccessorType::Generic> ra_yg = saxpy_args->y_inst.get_accessor();
RegionAccessor<AccessorType::Generic> ra_zg = saxpy_args->z_inst.get_accessor();
// now convert them to typed, "affine" accessors that we can use like arrays
RegionAccessor<AccessorType::Affine<1>, float> ra_x = ra_xg.typeify<float>().convert<AccessorType::Affine<1> >();
RegionAccessor<AccessorType::Affine<1>, float> ra_y = ra_yg.typeify<float>().convert<AccessorType::Affine<1> >();
RegionAccessor<AccessorType::Affine<1>, float> ra_z = ra_zg.typeify<float>().convert<AccessorType::Affine<1> >();
size_t num_elements = saxpy_args->bounds.volume();
size_t cta_threads = 256;
size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads;
hipLaunchKernelGGL(( gpu_saxpy), dim3(total_ctas), dim3(cta_threads), 0, 0, saxpy_args->alpha, saxpy_args->bounds,
ra_x, ra_y, ra_z);
// LOOK: NO WAIT! :)
}
| bd28d9b7b289b4a85132d30833f4fb00290aa20f.cu | /* Copyright 2016 Stanford University, NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "realm_saxpy.h"
__global__
void gpu_saxpy(const float alpha,
//const int num_elements,
Rect<1> bounds,
RegionAccessor<AccessorType::Affine<1>, float> ra_x,
RegionAccessor<AccessorType::Affine<1>, float> ra_y,
RegionAccessor<AccessorType::Affine<1>, float> ra_z)
// const float *x, const float *y, float *z)
{
Point<1> p = bounds.lo + make_point(blockIdx.x*blockDim.x+threadIdx.x);
if(bounds.contains(p))
ra_z[p] = alpha * ra_x[p] + ra_y[p];
}
__host__
void gpu_saxpy_task(const void *args, size_t arglen,
const void *userdata, size_t userlen, Processor p)
{
assert(arglen == sizeof(SaxpyArgs));
const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args;
printf("Running GPU Saxpy Task\n\n");
// get the generic accessors for each of our three instances
RegionAccessor<AccessorType::Generic> ra_xg = saxpy_args->x_inst.get_accessor();
RegionAccessor<AccessorType::Generic> ra_yg = saxpy_args->y_inst.get_accessor();
RegionAccessor<AccessorType::Generic> ra_zg = saxpy_args->z_inst.get_accessor();
// now convert them to typed, "affine" accessors that we can use like arrays
RegionAccessor<AccessorType::Affine<1>, float> ra_x = ra_xg.typeify<float>().convert<AccessorType::Affine<1> >();
RegionAccessor<AccessorType::Affine<1>, float> ra_y = ra_yg.typeify<float>().convert<AccessorType::Affine<1> >();
RegionAccessor<AccessorType::Affine<1>, float> ra_z = ra_zg.typeify<float>().convert<AccessorType::Affine<1> >();
size_t num_elements = saxpy_args->bounds.volume();
size_t cta_threads = 256;
size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads;
gpu_saxpy<<<total_ctas, cta_threads>>>(saxpy_args->alpha, saxpy_args->bounds,
ra_x, ra_y, ra_z);
// LOOK: NO WAIT! :)
}
|
2a5e1ebbed946481402fd316f46b4616966e7a67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
/* nbody.cu */
/*
this is a version of the implementation that is supposed to be run on an NVIDIA GPU.
It's a rather straightforward design, with each body calculating its own force vector
from all other bodies in one thread. I think this is appropriate granularity-wise,
if I wanted to use a thread for every (i,j) pair I would potentially spawn
trillions of threads, which is not favorable.
*/
#include <iostream>
#include <fstream>
#include <cmath>
#include <cstdio>
#include <string>
#include <algorithm>
#include <chrono>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <cstdlib>
#include <random>
using namespace std;
using namespace std::chrono;
__device__ __constant__ float G = 6.674e-11;
#define BLOCKSIZE 512
float *xPos;
float *yPos;
float *xVel;
float *yVel;
float *mass;
uint64_t n;
uint64_t currentTime;
int32_t gifW, gifH, gifDelay;
float timeInSeconds;
float gflops;
uint64_t getCurrentTime()
{
return currentTime;
}
uint64_t getCount()
{
return n;
}
__global__ void updateKernel(float *xPos, float *yPos, float *xVel, float *yVel, float *mass, uint64_t count)
{
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID >= count)
return;
int i = threadID;
float G = 6.674e-11;
float Fx = 0.0f; // 1
float Fy = 0.0f; // 1
for (unsigned int j = 0; j < count; j++)
{
if (j != i)
{
float dx = xPos[j] - xPos[i]; // 2
float dy = yPos[j] - yPos[i]; // 2
float r2 = dx*dx + dy*dy + 0.001f; // 5
float invertedR2 = 1.0f / r2; // 2
Fx += dx * invertedR2 * mass[j]; // 4
Fy += dy * invertedR2 * mass[j]; // 4
}
}
xVel[i] += G * Fx * mass[i]; // 4
yVel[i] += G * Fy * mass[i]; // 4
xPos[i] += xVel[i]; // 2
yPos[i] += yVel[i]; // 2
}
void optimizedUpdate() // estimated FLOP counts in comments on each line
{
hipError_t cudaStatus;
uint64_t nblocks = (n + BLOCKSIZE - 1) / BLOCKSIZE;
hipLaunchKernelGGL(( updateKernel), dim3(nblocks), dim3(BLOCKSIZE), 0, 0, xPos, yPos, xVel, yVel, mass, n);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
cout << "update failed: " << hipGetErrorString(cudaStatus) << endl;
hipDeviceSynchronize();
currentTime++;
}
void setGifProps(int w, int h, int d)
{
gifW = w;
gifH = h;
gifDelay = d;
}
__global__ void initializeBodies(float *xPos, float *yPos, float *xVel, float *yVel, float *mass, uint64_t count, int w, int h, int seed)
{
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID >= 1)
return;
hiprandState_t state;
hiprand_init(seed, 0, 0, &state);
for (uint64_t i = 0; i < count - 1; ++i)
{
xPos[i] = hiprand_uniform(&state) * w - w / 2;
yPos[i] = hiprand_uniform(&state) * h - h / 2;
mass[i] = hiprand_uniform(&state) * 10000.0 + 10000.0;
xVel[i] = yVel[i] = 0.0;
}
xPos[count - 1] = 0.0;
yPos[count - 1] = 0.0;
mass[count - 1] = 100000.0;
xVel[count - 1] = yVel[count - 1] = 0.0;
}
void randomBodies_onDev(uint64_t count) // initialize everything on the device using a different kernel
{
hipMalloc((void**)&xPos, count * sizeof(float));
hipMalloc((void**)&yPos, count * sizeof(float));
hipMalloc((void**)&xVel, count * sizeof(float));
hipMalloc((void**)&yVel, count * sizeof(float));
hipMalloc((void**)&mass, count * sizeof(float));
// 1 thread is enough
initializeBodies << <1, 32 >> > (xPos, yPos, xVel, yVel, mass, count, gifW, gifH, unsigned(time(NULL)));
n = count;
}
void randomBodies(uint64_t count) // initialize on the host, then copy over to device
{
hipMalloc((void**)&xPos, count * sizeof(float));
hipMalloc((void**)&yPos, count * sizeof(float));
hipMalloc((void**)&xVel, count * sizeof(float));
hipMalloc((void**)&yVel, count * sizeof(float));
hipMalloc((void**)&mass, count * sizeof(float));
default_random_engine generator;
std::uniform_int_distribution<int> xpos(-gifW / 2, gifW / 2);
std::uniform_int_distribution<int> ypos(-gifH / 2, gifH / 2);
std::uniform_real_distribution<float> massgen(10000.0, 20000.0);
float *xPos_h = new float[count];
float *yPos_h = new float[count];
float *xVel_h = new float[count];
float *yVel_h = new float[count];
float *mass_h = new float[count];
for (uint64_t i = 0; i < count - 1; ++i)
{
xPos_h[i] = xpos(generator);
yPos_h[i] = ypos(generator);
mass_h[i] = massgen(generator);
xVel_h[i] = yVel_h[i] = 0.0;
}
xPos_h[count - 1] = 0.0;
yPos_h[count - 1] = 0.0;
mass_h[count - 1] = 100000.0;
xVel_h[count - 1] = yVel_h[count - 1] = 0.0;
n = count;
hipMemcpy(xPos, xPos_h, count * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(yPos, yPos_h, count * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(xVel, xVel_h, count * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(yVel, yVel_h, count * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(mass, mass_h, count * sizeof(float), hipMemcpyHostToDevice);
delete[] xPos_h;
delete[] yPos_h;
delete[] xVel_h;
delete[] yVel_h;
delete[] mass_h;
}
void simulate(int bodies, int iters)
{
setGifProps(1024, 1024, 1);
randomBodies(bodies);
high_resolution_clock::time_point start = high_resolution_clock::now();
for (int i = 0; i < iters; ++i)
optimizedUpdate();
float finish = duration_cast<duration<float>>(high_resolution_clock::now() - start).count();
// (19*(n-1) + 14)*n*k total floating point operations == (19n-5)*n*k
uint64_t appxFlops = (19 * getCount() - 5)*getCount()*iters;
timeInSeconds = finish;
gflops = 1e-9 * appxFlops / finish;
hipFree(xPos);
hipFree(yPos);
hipFree(xVel);
hipFree(yVel);
hipFree(mass);
}
int main(int argc, char **argv)
{
cout << "Number of bodies: ";
int b;
cin >> b;
cout << "Number of iterations: ";
int k;
cin >> k;
cout << "CUDA" << /*" threads\t"*/"\t";
simulate(b, k);
cout << getCount() << /*" bodies\t"*/"\t" << getCurrentTime() << /*" iterations\t"*/"\t" << timeInSeconds << /*" seconds\t"*/"\t" << gflops /*<< " GFlops/s." */<< endl;
//cout << getCount() << " bodies\n" << getCurrentTime() << " iterations\n" << timeInSeconds << " seconds\n" << gflops << " GFlops/s." << endl;
system("PAUSE");
return 0;
} | 2a5e1ebbed946481402fd316f46b4616966e7a67.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
/* nbody.cu */
/*
this is a version of the implementation that is supposed to be run on an NVIDIA GPU.
It's a rather straightforward design, with each body calculating its own force vector
from all other bodies in one thread. I think this is appropriate granularity-wise,
if I wanted to use a thread for every (i,j) pair I would potentially spawn
trillions of threads, which is not favorable.
*/
#include <iostream>
#include <fstream>
#include <cmath>
#include <cstdio>
#include <string>
#include <algorithm>
#include <chrono>
#include <curand.h>
#include <curand_kernel.h>
#include <cstdlib>
#include <random>
using namespace std;
using namespace std::chrono;
__device__ __constant__ float G = 6.674e-11;
#define BLOCKSIZE 512
float *xPos;
float *yPos;
float *xVel;
float *yVel;
float *mass;
uint64_t n;
uint64_t currentTime;
int32_t gifW, gifH, gifDelay;
float timeInSeconds;
float gflops;
uint64_t getCurrentTime()
{
return currentTime;
}
uint64_t getCount()
{
return n;
}
__global__ void updateKernel(float *xPos, float *yPos, float *xVel, float *yVel, float *mass, uint64_t count)
{
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID >= count)
return;
int i = threadID;
float G = 6.674e-11;
float Fx = 0.0f; // 1
float Fy = 0.0f; // 1
for (unsigned int j = 0; j < count; j++)
{
if (j != i)
{
float dx = xPos[j] - xPos[i]; // 2
float dy = yPos[j] - yPos[i]; // 2
float r2 = dx*dx + dy*dy + 0.001f; // 5
float invertedR2 = 1.0f / r2; // 2
Fx += dx * invertedR2 * mass[j]; // 4
Fy += dy * invertedR2 * mass[j]; // 4
}
}
xVel[i] += G * Fx * mass[i]; // 4
yVel[i] += G * Fy * mass[i]; // 4
xPos[i] += xVel[i]; // 2
yPos[i] += yVel[i]; // 2
}
void optimizedUpdate() // estimated FLOP counts in comments on each line
{
cudaError_t cudaStatus;
uint64_t nblocks = (n + BLOCKSIZE - 1) / BLOCKSIZE;
updateKernel<<<nblocks, BLOCKSIZE>>>(xPos, yPos, xVel, yVel, mass, n);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
cout << "update failed: " << cudaGetErrorString(cudaStatus) << endl;
cudaDeviceSynchronize();
currentTime++;
}
void setGifProps(int w, int h, int d)
{
gifW = w;
gifH = h;
gifDelay = d;
}
__global__ void initializeBodies(float *xPos, float *yPos, float *xVel, float *yVel, float *mass, uint64_t count, int w, int h, int seed)
{
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID >= 1)
return;
curandState_t state;
curand_init(seed, 0, 0, &state);
for (uint64_t i = 0; i < count - 1; ++i)
{
xPos[i] = curand_uniform(&state) * w - w / 2;
yPos[i] = curand_uniform(&state) * h - h / 2;
mass[i] = curand_uniform(&state) * 10000.0 + 10000.0;
xVel[i] = yVel[i] = 0.0;
}
xPos[count - 1] = 0.0;
yPos[count - 1] = 0.0;
mass[count - 1] = 100000.0;
xVel[count - 1] = yVel[count - 1] = 0.0;
}
void randomBodies_onDev(uint64_t count) // initialize everything on the device using a different kernel
{
cudaMalloc((void**)&xPos, count * sizeof(float));
cudaMalloc((void**)&yPos, count * sizeof(float));
cudaMalloc((void**)&xVel, count * sizeof(float));
cudaMalloc((void**)&yVel, count * sizeof(float));
cudaMalloc((void**)&mass, count * sizeof(float));
// 1 thread is enough
initializeBodies << <1, 32 >> > (xPos, yPos, xVel, yVel, mass, count, gifW, gifH, unsigned(time(NULL)));
n = count;
}
void randomBodies(uint64_t count) // initialize on the host, then copy over to device
{
cudaMalloc((void**)&xPos, count * sizeof(float));
cudaMalloc((void**)&yPos, count * sizeof(float));
cudaMalloc((void**)&xVel, count * sizeof(float));
cudaMalloc((void**)&yVel, count * sizeof(float));
cudaMalloc((void**)&mass, count * sizeof(float));
default_random_engine generator;
std::uniform_int_distribution<int> xpos(-gifW / 2, gifW / 2);
std::uniform_int_distribution<int> ypos(-gifH / 2, gifH / 2);
std::uniform_real_distribution<float> massgen(10000.0, 20000.0);
float *xPos_h = new float[count];
float *yPos_h = new float[count];
float *xVel_h = new float[count];
float *yVel_h = new float[count];
float *mass_h = new float[count];
for (uint64_t i = 0; i < count - 1; ++i)
{
xPos_h[i] = xpos(generator);
yPos_h[i] = ypos(generator);
mass_h[i] = massgen(generator);
xVel_h[i] = yVel_h[i] = 0.0;
}
xPos_h[count - 1] = 0.0;
yPos_h[count - 1] = 0.0;
mass_h[count - 1] = 100000.0;
xVel_h[count - 1] = yVel_h[count - 1] = 0.0;
n = count;
cudaMemcpy(xPos, xPos_h, count * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(yPos, yPos_h, count * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(xVel, xVel_h, count * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(yVel, yVel_h, count * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(mass, mass_h, count * sizeof(float), cudaMemcpyHostToDevice);
delete[] xPos_h;
delete[] yPos_h;
delete[] xVel_h;
delete[] yVel_h;
delete[] mass_h;
}
void simulate(int bodies, int iters)
{
setGifProps(1024, 1024, 1);
randomBodies(bodies);
high_resolution_clock::time_point start = high_resolution_clock::now();
for (int i = 0; i < iters; ++i)
optimizedUpdate();
float finish = duration_cast<duration<float>>(high_resolution_clock::now() - start).count();
// (19*(n-1) + 14)*n*k total floating point operations == (19n-5)*n*k
uint64_t appxFlops = (19 * getCount() - 5)*getCount()*iters;
timeInSeconds = finish;
gflops = 1e-9 * appxFlops / finish;
cudaFree(xPos);
cudaFree(yPos);
cudaFree(xVel);
cudaFree(yVel);
cudaFree(mass);
}
int main(int argc, char **argv)
{
cout << "Number of bodies: ";
int b;
cin >> b;
cout << "Number of iterations: ";
int k;
cin >> k;
cout << "CUDA" << /*" threads\t"*/"\t";
simulate(b, k);
cout << getCount() << /*" bodies\t"*/"\t" << getCurrentTime() << /*" iterations\t"*/"\t" << timeInSeconds << /*" seconds\t"*/"\t" << gflops /*<< " GFlops/s." */<< endl;
//cout << getCount() << " bodies\n" << getCurrentTime() << " iterations\n" << timeInSeconds << " seconds\n" << gflops << " GFlops/s." << endl;
system("PAUSE");
return 0;
} |
1f2e2b4b110bb36107cc2f862e29d4c09b161df9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<math.h>
#include"LevelSet/lsTools.h"
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#define PI 3.14159265359
__device__ double Phi_x_WENO(
double beta1,
double beta2,
double beta3,
double beta4,
double beta5
)
{
double s_b1, s_b2, s_b3,
alpha_1, alpha_2, alpha_3,
omega_1, omega_2, omega_3, result;
s_b1 = (13.0/12.0)*(beta1 - 2.0*beta2 + beta3)
*(beta1 - 2.0*beta2 + beta3)
+ (0.25)*(beta1 - 4.0*beta2 + 3.0*beta3)
*(beta1 - 4.0*beta2 + 3.0*beta3);
s_b2 = (13.0/12.0)*(beta2 - 2.0*beta3 + beta4)
*(beta2 - 2.0*beta3 + beta4)
+ (0.25)*(beta2 - beta4)*(beta2 - beta4);
s_b3 = (13.0/12.0)*(beta3 - 2.0*beta4 + beta5)
*(beta3 - 2.0*beta4 + beta5)
+ (0.25)*(3.0*beta3 - 4.0*beta4 + beta5)
*(3.0*beta3 - 4.0*beta4 + beta5);
alpha_1 = 0.1 /((s_b1 + 1.0e-6)*(s_b1 + 1.0e-6));
alpha_2 = 0.6 /((s_b2 + 1.0e-6)*(s_b2 + 1.0e-6));
alpha_3 = 0.3 /((s_b3 + 1.0e-6)*(s_b3 + 1.0e-6));
omega_1 = alpha_1 / (alpha_1 + alpha_2 + alpha_3);
omega_2 = alpha_2 / (alpha_1 + alpha_2 + alpha_3);
omega_3 = alpha_3 / (alpha_1 + alpha_2 + alpha_3);
result = ((omega_1*(2.0*beta1 - 7.0*beta2 + 11.0*beta3)
+ omega_2*(-1.0*beta2 + 5.0*beta3 + 2.0*beta4)
+ omega_3*(2.0*beta3 + 5.0*beta4 - beta5))*(1.0/6.0));
return result;
}
__global__ void Dev1thO_Downwind(
double* const d_Phi,
const double* const phiS,
const double deltaX,
const double deltaY,
const double deltaZ,
const unsigned int Nx,
const unsigned int Ny,
const unsigned int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
//Offsets sample (id_ip) EQ (i+1,j,k)
unsigned int id = Nx*Ny*idz + Nx*idy + idx,
id_im = Nx*Ny*idz + Nx*idy + idx - 1,
id_jm = Nx*Ny*idz + Nx*(idy - 1) + idx,
id_km = Nx*Ny*(idz - 1) + Nx*idy + idx;
unsigned int ix = id,
iy = id,
iz = id;
//Dealing with boundaries
if(idx==0){id_im = id; ix = Nx*Ny*idz + Nx*idy + 1;}
if(idy==0){id_jm = id; iy = Nx*Ny*idz + Nx*1 + idx;}
if(idz==0){id_km = id; iz = Nx*Ny*1 + Nx*idy + idx;}
const unsigned int Offset = Nx*Ny*Nz;
d_Phi[ id] = deltaX*(phiS[ix] - phiS[id_im]);
d_Phi[1*Offset + id] = deltaY*(phiS[iy] - phiS[id_jm]);
d_Phi[2*Offset + id] = deltaZ*(phiS[iz] - phiS[id_km]);
return;
}
__global__ void PhiDevPlusParameter(
double* const phi_xyz,
const double* const d_Phi,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny;
//Dealing with boundaries
if(idx == 0 ){id_im1 = id; id_im2 = id_ip1;}
if(idx == 1 ){id_im2 = id_im1;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id_im1;}
if(idx == Nx -2){id_ip2 = id_ip1;}
if(idy == 0 ){id_jm1 = id; id_jm2 = id_jp1;}
if(idy == 1 ){id_jm2 = id_jm1;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id_jm1;}
if(idy == Ny -2){id_jp2 = id_jp1;}
if(idz == 0 ){id_km1 = id; id_km2 = id_kp1;}
if(idz == 1 ){id_km2 = id_km1;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id_jm1;}
if(idz == Nz -2){id_kp2 = id_kp1;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = d_Phi[id_im2];
beta2 = d_Phi[id_im1];
beta3 = d_Phi[id];
beta4 = d_Phi[id_ip1];
beta5 = d_Phi[id_ip2];
phi_xyz[id] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = d_Phi[id_jm2 + 1*Offset];
beta2 = d_Phi[id_jm1 + 1*Offset];
beta3 = d_Phi[id + 1*Offset];
beta4 = d_Phi[id_jp1 + 1*Offset];
beta5 = d_Phi[id_jp2 + 1*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = d_Phi[id_km2 + 2*Offset];
beta2 = d_Phi[id_km1 + 2*Offset];
beta3 = d_Phi[id + 2*Offset];
beta4 = d_Phi[id_kp1 + 2*Offset];
beta5 = d_Phi[id_kp2 + 2*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void PhiDevMinusParameter(
double* const phi_xyz,
const double* const d_Phi,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny,
id_ip3 = (idx + 3) + idy*Nx + idz*Nx*Ny,
id_jp3 = idx + (idy + 3)*Nx + idz*Nx*Ny,
id_kp3 = idx + idy*Nx + (idz + 3)*Nx*Ny;
//Dealing with boundaries
if(idx == 0 ){id_im1 = id;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id_im1; id_ip3 = id_im2;}
if(idx == Nx -2){id_ip2 = id_ip1; id_ip3 = id;}
if(idx == Nx -3){id_ip3 = id_ip2;}
if(idy == 0 ){id_jm1 = id;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id_jm1; id_jp3 = id_jm2;}
if(idy == Ny -2){id_jp2 = id_jp1; id_jp3 = id;}
if(idy == Ny -3){id_jp3 = id_jp2;}
if(idz == 0 ){id_km1 = id;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id_km1;id_kp3 = id_km2;}
if(idz == Nz -2){id_kp2 = id_kp1; id_kp3 = id;}
if(idz == Nz -3){id_kp3 = id_kp2;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = d_Phi[id_ip3];
beta2 = d_Phi[id_ip2];
beta3 = d_Phi[id_ip1];
beta4 = d_Phi[id ];
beta5 = d_Phi[id_im1];
phi_xyz[id ] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = d_Phi[id_jp3 + 1*Offset];
beta2 = d_Phi[id_jp2 + 1*Offset];
beta3 = d_Phi[id_jp1 + 1*Offset];
beta4 = d_Phi[id + 1*Offset];
beta5 = d_Phi[id_jm1 + 1*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = d_Phi[id_kp3 + 2*Offset];
beta2 = d_Phi[id_kp2 + 2*Offset];
beta3 = d_Phi[id_kp1 + 2*Offset];
beta4 = d_Phi[id + 2*Offset];
beta5 = d_Phi[id_km1 + 2*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void reini_RS_WENO(
double* const rs,
const double* const phiS,
const double deltaXYZ,
const double* const d_phiP,
const double* const d_phiM,
const double* const phiS0,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz;
double so, rs_x, rs_y, rs_z, ta, grad_mod;
double phiMax, phiMin;
ta = (double)(phiS0[id] > 0.0) - (double)(phiS0[id] < 0.0);
//Getting gradient axis X
phiMax = MAX(d_phiP[id ], 0.0)*MAX(d_phiP[id ], 0.0);
phiMin = MIN(d_phiM[id ], 0.0)*MIN(d_phiM[id ], 0.0);
rs_x = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id ], 0.0)*MAX(d_phiM[id ], 0.0);
phiMin = MIN(d_phiP[id ], 0.0)*MIN(d_phiP[id ], 0.0);
rs_x += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Y
phiMax = MAX(d_phiP[id + 1*Offset], 0.0)
*MAX(d_phiP[id + 1*Offset], 0.0);
phiMin = MIN(d_phiM[id + 1*Offset], 0.0)
*MIN(d_phiM[id + 1*Offset], 0.0);
rs_y = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 1*Offset], 0.0)
*MAX(d_phiM[id + 1*Offset], 0.0);
phiMin = MIN(d_phiP[id + 1*Offset], 0.0)
*MIN(d_phiP[id + 1*Offset], 0.0);
rs_y += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Z
phiMax = MAX(d_phiP[id + 2*Offset], 0.0)
*MAX(d_phiP[id + 2*Offset], 0.0);
phiMin = MIN(d_phiM[id + 2*Offset], 0.0)
*MIN(d_phiM[id + 2*Offset], 0.0);
rs_z = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 2*Offset], 0.0)
*MAX(d_phiM[id + 2*Offset], 0.0);
phiMin = MIN(d_phiP[id + 2*Offset], 0.0)
*MIN(d_phiP[id + 2*Offset], 0.0);
rs_z += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
grad_mod = sqrt(rs_x + rs_y + rs_z);
so = phiS0[id]
/ sqrt(phiS0[id]*phiS0[id] + deltaXYZ*deltaXYZ );
rs[id] = 1.0*so*(grad_mod - 1.0);
return;
}
__global__ void advect_RS_WENO(
double* const rs, //RHS
const double* const velocity,
const double* const d_phiP_d,
const double* const d_phiM_d,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz,
Offset = Nx*Ny*Nz;
double rs_x, rs_y, rs_z;
double grad_x, grad_y, grad_z;
double rsign;
rsign = (double)(velocity[id] > 0.0)
- (double)(velocity[id] < 0.0);
rs_x = 0.5* (rsign + 1.0)*velocity[id]*d_phiP_d[id]
+ 0.5*abs(rsign - 1.0)*velocity[id]*d_phiM_d[id];
grad_x = 0.5* (rsign + 1.0)*d_phiP_d[id]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id];
rsign = (double)(velocity[id + 1*Offset] > 0.0)
- (double)(velocity[id + 1*Offset] < 0.0);
rs_y = 0.5*(rsign + 1.0)*velocity[id + 1*Offset]
*d_phiP_d[id + 1*Offset]
+ 0.5*abs(rsign - 1.0)*velocity[id + 1*Offset]
*d_phiM_d[id + 1*Offset];
grad_y = 0.5* (rsign + 1.0)*d_phiP_d[id + 1*Offset]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id + 1*Offset];
rsign = (double)(velocity[id + 2*Offset] > 0.0)
- (double)(velocity[id + 2*Offset] < 0.0);
rs_z = 0.5*(rsign + 1.0)*velocity[id + 2*Offset]
*d_phiP_d[id + 2*Offset]
+ 0.5*abs(rsign - 1.0)*velocity[id + 2*Offset]
*d_phiM_d[id + 2*Offset];
grad_z = 0.5* (rsign + 1.0)*d_phiP_d[id + 2*Offset]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id + 2*Offset];
rs[id] = rs_x + rs_y + rs_z;
return;
}
__global__
void enrightVelocityProfile(
double *vel, //Velocity Array
double *xMesh, //Mesh values
double *yMesh,
double *zMesh,
const int Nx, //Mesh dimensions
const int Ny,
const int Nz,
const double time, //current time
const double period
)
{
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz,
offset = Nx*Ny*Nz;
vel[id ] = 2.0*sin(PI*xMesh[id])*sin(PI*xMesh[id])
*sin(2.0*PI*yMesh[id])
*sin(2.0*PI*zMesh[id])*cos(PI*time/period);
vel[id + 1*offset] = -sin(PI*yMesh[id])*sin(PI*yMesh[id])
*sin(2.0*PI*xMesh[id])
*sin(2.0*PI*zMesh[id])*cos(PI*time/period);
vel[id + 2*offset] = -sin(PI*zMesh[id])*sin(PI*zMesh[id])
*sin(2.0*PI*yMesh[id])
*sin(2.0*PI*xMesh[id])*cos(PI*time/period);
}
__global__
void meshRegularStructured(
double *xMesh, //Mesh values
double *yMesh,
double *zMesh,
double deltaX,
double deltaY,
double deltaZ,
const int Nx, //Mesh dimensions
const int Ny,
const int Nz
)
{
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz;
xMesh[id] = (double)(idx - 5.0)*deltaX;
yMesh[id] = (double)(idy - 5.0)*deltaY;
zMesh[id] = (double)(idz - 5.0)*deltaZ;
}
__global__
void cuGhostCellsMirror3dZ(
double *ghostArray,
const int ncells,
const int Nx,
const int Ny,
const int Nz,
double direction
)
{
int NxG = Nx + 2*ncells,
NyG = Ny + 2*ncells;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y;
//Left boundary
for(unsigned int idz = 0; idz < ncells; idz++){
unsigned int id = idx + NxG*idy + NxG*NyG*(ncells + idz);
unsigned int idG = idx + NxG*idy + NxG*NyG*(ncells - idz - 1);
ghostArray[idG] = ghostArray[id]*direction;
}
//right boundary
for(unsigned int idz = 0; idz < ncells; idz++){
unsigned int id = idx + NxG*idy + NxG*NyG*(Nz - idz - 1 + ncells);
unsigned int idG = idx + NxG*idy + NxG*NyG*(Nz + idz + ncells);
ghostArray[idG] = ghostArray[id]*direction;
}
}
__global__
void cuGhostCellsMirror3dY(
double *ghostArray,
const int ncells,
const int Nx,
const int Ny,
const int Nz,
double direction
)
{
int NxG = Nx + 2*ncells,
NyG = Ny + 2*ncells,
NzG = Nz + 2*ncells;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idz = blockDim.y*blockIdx.y + threadIdx.y;
//Left boundary
for(unsigned int idy = 0; idy < ncells; idy++){
unsigned int id = idx + NxG*(ncells + idy) + NxG*NyG*idz;
unsigned int idG = idx + NxG*(ncells - idy - 1) + NxG*NyG*idz;
ghostArray[idG] = ghostArray[id]*direction;
}
//right boundary
for(unsigned int idy = 0; idy < ncells; idy++){
unsigned int id = idx + NxG*(Ny - idy - 1 + ncells) + NxG*NyG*idz;
unsigned int idG = idx + NxG*(Ny + idy + ncells) + NxG*NyG*idz;
ghostArray[idG] = ghostArray[id]*direction;
}
}
__global__
void cuGhostCellsMirror3dX(
double *ghostArray,
const int ncells,
const int Nx,
const int Ny,
const int Nz,
double direction
)
{
int NxG = Nx + 2*ncells,
NyG = Ny + 2*ncells,
NzG = Nz + 2*ncells;
unsigned int idy = blockDim.x*blockIdx.x + threadIdx.x,
idz = blockDim.y*blockIdx.y + threadIdx.y;
//Left boundary
for(unsigned int idx = 0; idx < ncells; idx++){
unsigned int id = (ncells + idx) + NxG*idy + NxG*NyG*idz;
unsigned int idG = (ncells - idx - 1) + NxG*idy + NxG*NyG*idz;
ghostArray[idG] = ghostArray[id]*direction;
}
//right boundary
for(unsigned int idx = 0; idx < ncells; idx++){
unsigned int id = (Nx - idx - 1 + ncells) + NxG*idy + NxG*NyG*idz;
unsigned int idG = (Nx + idx + ncells) + NxG*idy + NxG*NyG*idz;
ghostArray[idG] = ghostArray[id]*direction;
}
}
__global__ void PhiDevPlusParameterJB(
double* const phi_xyz,
const double* const d_Phi,
const double* const d_jbn,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny;
//Dealing with boundaries
if(idx == 0 ){id_im1 = id; id_im2 = id_ip1;}
if(idx == 1 ){id_im2 = id_im1;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id_im1;}
if(idx == Nx -2){id_ip2 = id_ip1;}
if(idy == 0 ){id_jm1 = id; id_jm2 = id_jp1;}
if(idy == 1 ){id_jm2 = id_jm1;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id_jm1;}
if(idy == Ny -2){id_jp2 = id_jp1;}
if(idz == 0 ){id_km1 = id; id_km2 = id_kp1;}
if(idz == 1 ){id_km2 = id_km1;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id_jm1;}
if(idz == Nz -2){id_kp2 = id_kp1;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = d_Phi[id_im2]*d_jbn[id];
beta2 = d_Phi[id_im1]*d_jbn[id];
beta3 = d_Phi[id ]*d_jbn[id];
beta4 = d_Phi[id_ip1]*d_jbn[id];
beta5 = d_Phi[id_ip2]*d_jbn[id];
phi_xyz[id] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = d_Phi[id_jm2 + 1*Offset]*d_jbn[id + 4*Offset];
beta2 = d_Phi[id_jm1 + 1*Offset]*d_jbn[id + 4*Offset];
beta3 = d_Phi[id + 1*Offset]*d_jbn[id + 4*Offset];
beta4 = d_Phi[id_jp1 + 1*Offset]*d_jbn[id + 4*Offset];
beta5 = d_Phi[id_jp2 + 1*Offset]*d_jbn[id + 4*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = d_Phi[id_km2 + 2*Offset]*d_jbn[id + 8*Offset];
beta2 = d_Phi[id_km1 + 2*Offset]*d_jbn[id + 8*Offset];
beta3 = d_Phi[id + 2*Offset]*d_jbn[id + 8*Offset];
beta4 = d_Phi[id_kp1 + 2*Offset]*d_jbn[id + 8*Offset];
beta5 = d_Phi[id_kp2 + 2*Offset]*d_jbn[id + 8*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void PhiDevMinusParameterJB(
double* const phi_xyz,
const double* const d_Phi,
const double* const d_jbn,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny,
id_ip3 = (idx + 3) + idy*Nx + idz*Nx*Ny,
id_jp3 = idx + (idy + 3)*Nx + idz*Nx*Ny,
id_kp3 = idx + idy*Nx + (idz + 3)*Nx*Ny;
//Dealing with boundaries
if(idx == 0 ){id_im1 = id;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id_im1; id_ip3 = id_im2;}
if(idx == Nx -2){id_ip2 = id_ip1; id_ip3 = id;}
if(idx == Nx -3){id_ip3 = id_ip2;}
if(idy == 0 ){id_jm1 = id;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id_jm1; id_jp3 = id_jm2;}
if(idy == Ny -2){id_jp2 = id_jp1; id_jp3 = id;}
if(idy == Ny -3){id_jp3 = id_jp2;}
if(idz == 0 ){id_km1 = id;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id_km1;id_kp3 = id_km2;}
if(idz == Nz -2){id_kp2 = id_kp1; id_kp3 = id;}
if(idz == Nz -3){id_kp3 = id_kp2;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = d_Phi[id_ip3]*d_jbn[id];
beta2 = d_Phi[id_ip2]*d_jbn[id];
beta3 = d_Phi[id_ip1]*d_jbn[id];
beta4 = d_Phi[id ]*d_jbn[id];
beta5 = d_Phi[id_im1]*d_jbn[id];
phi_xyz[id ] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = d_Phi[id_jp3 + 1*Offset]*d_jbn[id + 4*Offset];
beta2 = d_Phi[id_jp2 + 1*Offset]*d_jbn[id + 4*Offset];
beta3 = d_Phi[id_jp1 + 1*Offset]*d_jbn[id + 4*Offset];
beta4 = d_Phi[id + 1*Offset]*d_jbn[id + 4*Offset];
beta5 = d_Phi[id_jm1 + 1*Offset]*d_jbn[id + 4*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = d_Phi[id_kp3 + 2*Offset]*d_jbn[id + 8*Offset];
beta2 = d_Phi[id_kp2 + 2*Offset]*d_jbn[id + 8*Offset];
beta3 = d_Phi[id_kp1 + 2*Offset]*d_jbn[id + 8*Offset];
beta4 = d_Phi[id + 2*Offset]*d_jbn[id + 8*Offset];
beta5 = d_Phi[id_km1 + 2*Offset]*d_jbn[id + 8*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__
void cuSwapToGhost(
double *ghostArray,
double *valueArray,
const int gcells,
const int Nx,
const int Ny,
const int Nz
)
{
int NxG = Nx + 2*gcells;
int NyG = Ny + 2*gcells;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny;
unsigned int idG = idx + gcells + (idy + gcells)*NxG
+ (idz + gcells)*NxG*NyG;
ghostArray[idG] = valueArray[id];
}
__global__
void cuSwapFromGhost(
double *valueArray,
double *ghostArray,
const int gcells,
const int Nx,
const int Ny,
const int Nz
)
{
int NxG = Nx + 2*gcells;
int NyG = Ny + 2*gcells;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny;
unsigned int idG = idx + gcells + (idy + gcells)*NxG
+ (idz + gcells)*NxG*NyG;
valueArray[id] = ghostArray[idG];
}
__global__ void reini_RS_WENOJB(
double* const rs,
const double* const phiS,
const double* const deltaXYZ,
const double* const d_phiP,
const double* const d_phiM,
const double* const phiS0,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz;
double so, rs_x, rs_y, rs_z, ta, grad_mod;
double phiMax, phiMin;
ta = (double)(phiS0[id] > 0.0) - (double)(phiS0[id] < 0.0);
//Getting gradient axis X
phiMax = MAX(d_phiP[id ], 0.0)*MAX(d_phiP[id ], 0.0);
phiMin = MIN(d_phiM[id ], 0.0)*MIN(d_phiM[id ], 0.0);
rs_x = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id ], 0.0)*MAX(d_phiM[id ], 0.0);
phiMin = MIN(d_phiP[id ], 0.0)*MIN(d_phiP[id ], 0.0);
rs_x += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Y
phiMax = MAX(d_phiP[id + 1*Offset], 0.0)
*MAX(d_phiP[id + 1*Offset], 0.0);
phiMin = MIN(d_phiM[id + 1*Offset], 0.0)
*MIN(d_phiM[id + 1*Offset], 0.0);
rs_y = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 1*Offset], 0.0)
*MAX(d_phiM[id + 1*Offset], 0.0);
phiMin = MIN(d_phiP[id + 1*Offset], 0.0)
*MIN(d_phiP[id + 1*Offset], 0.0);
rs_y += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Z
phiMax = MAX(d_phiP[id + 2*Offset], 0.0)
*MAX(d_phiP[id + 2*Offset], 0.0);
phiMin = MIN(d_phiM[id + 2*Offset], 0.0)
*MIN(d_phiM[id + 2*Offset], 0.0);
rs_z = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 2*Offset], 0.0)
*MAX(d_phiM[id + 2*Offset], 0.0);
phiMin = MIN(d_phiP[id + 2*Offset], 0.0)
*MIN(d_phiP[id + 2*Offset], 0.0);
rs_z += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
grad_mod = sqrt(rs_x + rs_y + rs_z);
so = phiS0[id]
/ sqrt(phiS0[id]*phiS0[id] + deltaXYZ[id]*deltaXYZ[id] );
rs[id] = 1.0*so*(grad_mod - 1.0);
return;
}
| 1f2e2b4b110bb36107cc2f862e29d4c09b161df9.cu | #include<cuda.h>
#include<math.h>
#include"LevelSet/lsTools.h"
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#define PI 3.14159265359
__device__ double Phi_x_WENO(
double beta1,
double beta2,
double beta3,
double beta4,
double beta5
)
{
double s_b1, s_b2, s_b3,
alpha_1, alpha_2, alpha_3,
omega_1, omega_2, omega_3, result;
s_b1 = (13.0/12.0)*(beta1 - 2.0*beta2 + beta3)
*(beta1 - 2.0*beta2 + beta3)
+ (0.25)*(beta1 - 4.0*beta2 + 3.0*beta3)
*(beta1 - 4.0*beta2 + 3.0*beta3);
s_b2 = (13.0/12.0)*(beta2 - 2.0*beta3 + beta4)
*(beta2 - 2.0*beta3 + beta4)
+ (0.25)*(beta2 - beta4)*(beta2 - beta4);
s_b3 = (13.0/12.0)*(beta3 - 2.0*beta4 + beta5)
*(beta3 - 2.0*beta4 + beta5)
+ (0.25)*(3.0*beta3 - 4.0*beta4 + beta5)
*(3.0*beta3 - 4.0*beta4 + beta5);
alpha_1 = 0.1 /((s_b1 + 1.0e-6)*(s_b1 + 1.0e-6));
alpha_2 = 0.6 /((s_b2 + 1.0e-6)*(s_b2 + 1.0e-6));
alpha_3 = 0.3 /((s_b3 + 1.0e-6)*(s_b3 + 1.0e-6));
omega_1 = alpha_1 / (alpha_1 + alpha_2 + alpha_3);
omega_2 = alpha_2 / (alpha_1 + alpha_2 + alpha_3);
omega_3 = alpha_3 / (alpha_1 + alpha_2 + alpha_3);
result = ((omega_1*(2.0*beta1 - 7.0*beta2 + 11.0*beta3)
+ omega_2*(-1.0*beta2 + 5.0*beta3 + 2.0*beta4)
+ omega_3*(2.0*beta3 + 5.0*beta4 - beta5))*(1.0/6.0));
return result;
}
__global__ void Dev1thO_Downwind(
double* const d_Phi,
const double* const phiS,
const double deltaX,
const double deltaY,
const double deltaZ,
const unsigned int Nx,
const unsigned int Ny,
const unsigned int Nz
)
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x,
idy = blockIdx.y*blockDim.y + threadIdx.y,
idz = blockIdx.z*blockDim.z + threadIdx.z;
//Offsets sample (id_ip) EQ (i+1,j,k)
unsigned int id = Nx*Ny*idz + Nx*idy + idx,
id_im = Nx*Ny*idz + Nx*idy + idx - 1,
id_jm = Nx*Ny*idz + Nx*(idy - 1) + idx,
id_km = Nx*Ny*(idz - 1) + Nx*idy + idx;
unsigned int ix = id,
iy = id,
iz = id;
//Dealing with boundaries
if(idx==0){id_im = id; ix = Nx*Ny*idz + Nx*idy + 1;}
if(idy==0){id_jm = id; iy = Nx*Ny*idz + Nx*1 + idx;}
if(idz==0){id_km = id; iz = Nx*Ny*1 + Nx*idy + idx;}
const unsigned int Offset = Nx*Ny*Nz;
d_Phi[ id] = deltaX*(phiS[ix] - phiS[id_im]);
d_Phi[1*Offset + id] = deltaY*(phiS[iy] - phiS[id_jm]);
d_Phi[2*Offset + id] = deltaZ*(phiS[iz] - phiS[id_km]);
return;
}
__global__ void PhiDevPlusParameter(
double* const phi_xyz,
const double* const d_Phi,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny;
//Dealing with boundaries
if(idx == 0 ){id_im1 = id; id_im2 = id_ip1;}
if(idx == 1 ){id_im2 = id_im1;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id_im1;}
if(idx == Nx -2){id_ip2 = id_ip1;}
if(idy == 0 ){id_jm1 = id; id_jm2 = id_jp1;}
if(idy == 1 ){id_jm2 = id_jm1;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id_jm1;}
if(idy == Ny -2){id_jp2 = id_jp1;}
if(idz == 0 ){id_km1 = id; id_km2 = id_kp1;}
if(idz == 1 ){id_km2 = id_km1;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id_jm1;}
if(idz == Nz -2){id_kp2 = id_kp1;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = d_Phi[id_im2];
beta2 = d_Phi[id_im1];
beta3 = d_Phi[id];
beta4 = d_Phi[id_ip1];
beta5 = d_Phi[id_ip2];
phi_xyz[id] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = d_Phi[id_jm2 + 1*Offset];
beta2 = d_Phi[id_jm1 + 1*Offset];
beta3 = d_Phi[id + 1*Offset];
beta4 = d_Phi[id_jp1 + 1*Offset];
beta5 = d_Phi[id_jp2 + 1*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = d_Phi[id_km2 + 2*Offset];
beta2 = d_Phi[id_km1 + 2*Offset];
beta3 = d_Phi[id + 2*Offset];
beta4 = d_Phi[id_kp1 + 2*Offset];
beta5 = d_Phi[id_kp2 + 2*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void PhiDevMinusParameter(
double* const phi_xyz,
const double* const d_Phi,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny,
id_ip3 = (idx + 3) + idy*Nx + idz*Nx*Ny,
id_jp3 = idx + (idy + 3)*Nx + idz*Nx*Ny,
id_kp3 = idx + idy*Nx + (idz + 3)*Nx*Ny;
//Dealing with boundaries
if(idx == 0 ){id_im1 = id;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id_im1; id_ip3 = id_im2;}
if(idx == Nx -2){id_ip2 = id_ip1; id_ip3 = id;}
if(idx == Nx -3){id_ip3 = id_ip2;}
if(idy == 0 ){id_jm1 = id;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id_jm1; id_jp3 = id_jm2;}
if(idy == Ny -2){id_jp2 = id_jp1; id_jp3 = id;}
if(idy == Ny -3){id_jp3 = id_jp2;}
if(idz == 0 ){id_km1 = id;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id_km1;id_kp3 = id_km2;}
if(idz == Nz -2){id_kp2 = id_kp1; id_kp3 = id;}
if(idz == Nz -3){id_kp3 = id_kp2;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = d_Phi[id_ip3];
beta2 = d_Phi[id_ip2];
beta3 = d_Phi[id_ip1];
beta4 = d_Phi[id ];
beta5 = d_Phi[id_im1];
phi_xyz[id ] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = d_Phi[id_jp3 + 1*Offset];
beta2 = d_Phi[id_jp2 + 1*Offset];
beta3 = d_Phi[id_jp1 + 1*Offset];
beta4 = d_Phi[id + 1*Offset];
beta5 = d_Phi[id_jm1 + 1*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = d_Phi[id_kp3 + 2*Offset];
beta2 = d_Phi[id_kp2 + 2*Offset];
beta3 = d_Phi[id_kp1 + 2*Offset];
beta4 = d_Phi[id + 2*Offset];
beta5 = d_Phi[id_km1 + 2*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void reini_RS_WENO(
double* const rs,
const double* const phiS,
const double deltaXYZ,
const double* const d_phiP,
const double* const d_phiM,
const double* const phiS0,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz;
double so, rs_x, rs_y, rs_z, ta, grad_mod;
double phiMax, phiMin;
ta = (double)(phiS0[id] > 0.0) - (double)(phiS0[id] < 0.0);
//Getting gradient axis X
phiMax = MAX(d_phiP[id ], 0.0)*MAX(d_phiP[id ], 0.0);
phiMin = MIN(d_phiM[id ], 0.0)*MIN(d_phiM[id ], 0.0);
rs_x = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id ], 0.0)*MAX(d_phiM[id ], 0.0);
phiMin = MIN(d_phiP[id ], 0.0)*MIN(d_phiP[id ], 0.0);
rs_x += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Y
phiMax = MAX(d_phiP[id + 1*Offset], 0.0)
*MAX(d_phiP[id + 1*Offset], 0.0);
phiMin = MIN(d_phiM[id + 1*Offset], 0.0)
*MIN(d_phiM[id + 1*Offset], 0.0);
rs_y = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 1*Offset], 0.0)
*MAX(d_phiM[id + 1*Offset], 0.0);
phiMin = MIN(d_phiP[id + 1*Offset], 0.0)
*MIN(d_phiP[id + 1*Offset], 0.0);
rs_y += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Z
phiMax = MAX(d_phiP[id + 2*Offset], 0.0)
*MAX(d_phiP[id + 2*Offset], 0.0);
phiMin = MIN(d_phiM[id + 2*Offset], 0.0)
*MIN(d_phiM[id + 2*Offset], 0.0);
rs_z = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 2*Offset], 0.0)
*MAX(d_phiM[id + 2*Offset], 0.0);
phiMin = MIN(d_phiP[id + 2*Offset], 0.0)
*MIN(d_phiP[id + 2*Offset], 0.0);
rs_z += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
grad_mod = sqrt(rs_x + rs_y + rs_z);
so = phiS0[id]
/ sqrt(phiS0[id]*phiS0[id] + deltaXYZ*deltaXYZ );
rs[id] = 1.0*so*(grad_mod - 1.0);
return;
}
__global__ void advect_RS_WENO(
double* const rs, //RHS
const double* const velocity,
const double* const d_phiP_d,
const double* const d_phiM_d,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz,
Offset = Nx*Ny*Nz;
double rs_x, rs_y, rs_z;
double grad_x, grad_y, grad_z;
double rsign;
rsign = (double)(velocity[id] > 0.0)
- (double)(velocity[id] < 0.0);
rs_x = 0.5* (rsign + 1.0)*velocity[id]*d_phiP_d[id]
+ 0.5*abs(rsign - 1.0)*velocity[id]*d_phiM_d[id];
grad_x = 0.5* (rsign + 1.0)*d_phiP_d[id]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id];
rsign = (double)(velocity[id + 1*Offset] > 0.0)
- (double)(velocity[id + 1*Offset] < 0.0);
rs_y = 0.5*(rsign + 1.0)*velocity[id + 1*Offset]
*d_phiP_d[id + 1*Offset]
+ 0.5*abs(rsign - 1.0)*velocity[id + 1*Offset]
*d_phiM_d[id + 1*Offset];
grad_y = 0.5* (rsign + 1.0)*d_phiP_d[id + 1*Offset]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id + 1*Offset];
rsign = (double)(velocity[id + 2*Offset] > 0.0)
- (double)(velocity[id + 2*Offset] < 0.0);
rs_z = 0.5*(rsign + 1.0)*velocity[id + 2*Offset]
*d_phiP_d[id + 2*Offset]
+ 0.5*abs(rsign - 1.0)*velocity[id + 2*Offset]
*d_phiM_d[id + 2*Offset];
grad_z = 0.5* (rsign + 1.0)*d_phiP_d[id + 2*Offset]
+ 0.5*abs(rsign - 1.0)*d_phiM_d[id + 2*Offset];
rs[id] = rs_x + rs_y + rs_z;
return;
}
__global__
void enrightVelocityProfile(
double *vel, //Velocity Array
double *xMesh, //Mesh values
double *yMesh,
double *zMesh,
const int Nx, //Mesh dimensions
const int Ny,
const int Nz,
const double time, //current time
const double period
)
{
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz,
offset = Nx*Ny*Nz;
vel[id ] = 2.0*sin(PI*xMesh[id])*sin(PI*xMesh[id])
*sin(2.0*PI*yMesh[id])
*sin(2.0*PI*zMesh[id])*cos(PI*time/period);
vel[id + 1*offset] = -sin(PI*yMesh[id])*sin(PI*yMesh[id])
*sin(2.0*PI*xMesh[id])
*sin(2.0*PI*zMesh[id])*cos(PI*time/period);
vel[id + 2*offset] = -sin(PI*zMesh[id])*sin(PI*zMesh[id])
*sin(2.0*PI*yMesh[id])
*sin(2.0*PI*xMesh[id])*cos(PI*time/period);
}
__global__
void meshRegularStructured(
double *xMesh, //Mesh values
double *yMesh,
double *zMesh,
double deltaX,
double deltaY,
double deltaZ,
const int Nx, //Mesh dimensions
const int Ny,
const int Nz
)
{
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz;
xMesh[id] = (double)(idx - 5.0)*deltaX;
yMesh[id] = (double)(idy - 5.0)*deltaY;
zMesh[id] = (double)(idz - 5.0)*deltaZ;
}
__global__
void cuGhostCellsMirror3dZ(
double *ghostArray,
const int ncells,
const int Nx,
const int Ny,
const int Nz,
double direction
)
{
int NxG = Nx + 2*ncells,
NyG = Ny + 2*ncells;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y;
//Left boundary
for(unsigned int idz = 0; idz < ncells; idz++){
unsigned int id = idx + NxG*idy + NxG*NyG*(ncells + idz);
unsigned int idG = idx + NxG*idy + NxG*NyG*(ncells - idz - 1);
ghostArray[idG] = ghostArray[id]*direction;
}
//right boundary
for(unsigned int idz = 0; idz < ncells; idz++){
unsigned int id = idx + NxG*idy + NxG*NyG*(Nz - idz - 1 + ncells);
unsigned int idG = idx + NxG*idy + NxG*NyG*(Nz + idz + ncells);
ghostArray[idG] = ghostArray[id]*direction;
}
}
__global__
void cuGhostCellsMirror3dY(
double *ghostArray,
const int ncells,
const int Nx,
const int Ny,
const int Nz,
double direction
)
{
int NxG = Nx + 2*ncells,
NyG = Ny + 2*ncells,
NzG = Nz + 2*ncells;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idz = blockDim.y*blockIdx.y + threadIdx.y;
//Left boundary
for(unsigned int idy = 0; idy < ncells; idy++){
unsigned int id = idx + NxG*(ncells + idy) + NxG*NyG*idz;
unsigned int idG = idx + NxG*(ncells - idy - 1) + NxG*NyG*idz;
ghostArray[idG] = ghostArray[id]*direction;
}
//right boundary
for(unsigned int idy = 0; idy < ncells; idy++){
unsigned int id = idx + NxG*(Ny - idy - 1 + ncells) + NxG*NyG*idz;
unsigned int idG = idx + NxG*(Ny + idy + ncells) + NxG*NyG*idz;
ghostArray[idG] = ghostArray[id]*direction;
}
}
__global__
void cuGhostCellsMirror3dX(
double *ghostArray,
const int ncells,
const int Nx,
const int Ny,
const int Nz,
double direction
)
{
int NxG = Nx + 2*ncells,
NyG = Ny + 2*ncells,
NzG = Nz + 2*ncells;
unsigned int idy = blockDim.x*blockIdx.x + threadIdx.x,
idz = blockDim.y*blockIdx.y + threadIdx.y;
//Left boundary
for(unsigned int idx = 0; idx < ncells; idx++){
unsigned int id = (ncells + idx) + NxG*idy + NxG*NyG*idz;
unsigned int idG = (ncells - idx - 1) + NxG*idy + NxG*NyG*idz;
ghostArray[idG] = ghostArray[id]*direction;
}
//right boundary
for(unsigned int idx = 0; idx < ncells; idx++){
unsigned int id = (Nx - idx - 1 + ncells) + NxG*idy + NxG*NyG*idz;
unsigned int idG = (Nx + idx + ncells) + NxG*idy + NxG*NyG*idz;
ghostArray[idG] = ghostArray[id]*direction;
}
}
__global__ void PhiDevPlusParameterJB(
double* const phi_xyz,
const double* const d_Phi,
const double* const d_jbn,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny;
//Dealing with boundaries
if(idx == 0 ){id_im1 = id; id_im2 = id_ip1;}
if(idx == 1 ){id_im2 = id_im1;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id_im1;}
if(idx == Nx -2){id_ip2 = id_ip1;}
if(idy == 0 ){id_jm1 = id; id_jm2 = id_jp1;}
if(idy == 1 ){id_jm2 = id_jm1;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id_jm1;}
if(idy == Ny -2){id_jp2 = id_jp1;}
if(idz == 0 ){id_km1 = id; id_km2 = id_kp1;}
if(idz == 1 ){id_km2 = id_km1;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id_jm1;}
if(idz == Nz -2){id_kp2 = id_kp1;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = d_Phi[id_im2]*d_jbn[id];
beta2 = d_Phi[id_im1]*d_jbn[id];
beta3 = d_Phi[id ]*d_jbn[id];
beta4 = d_Phi[id_ip1]*d_jbn[id];
beta5 = d_Phi[id_ip2]*d_jbn[id];
phi_xyz[id] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = d_Phi[id_jm2 + 1*Offset]*d_jbn[id + 4*Offset];
beta2 = d_Phi[id_jm1 + 1*Offset]*d_jbn[id + 4*Offset];
beta3 = d_Phi[id + 1*Offset]*d_jbn[id + 4*Offset];
beta4 = d_Phi[id_jp1 + 1*Offset]*d_jbn[id + 4*Offset];
beta5 = d_Phi[id_jp2 + 1*Offset]*d_jbn[id + 4*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = d_Phi[id_km2 + 2*Offset]*d_jbn[id + 8*Offset];
beta2 = d_Phi[id_km1 + 2*Offset]*d_jbn[id + 8*Offset];
beta3 = d_Phi[id + 2*Offset]*d_jbn[id + 8*Offset];
beta4 = d_Phi[id_kp1 + 2*Offset]*d_jbn[id + 8*Offset];
beta5 = d_Phi[id_kp2 + 2*Offset]*d_jbn[id + 8*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__ void PhiDevMinusParameterJB(
double* const phi_xyz,
const double* const d_Phi,
const double* const d_jbn,
unsigned const int Nx,
unsigned const int Ny,
unsigned const int Nz
)
{
unsigned const int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny,
id_im1 = (idx - 1) + idy*Nx + idz*Nx*Ny,
id_im2 = (idx - 2) + idy*Nx + idz*Nx*Ny,
id_ip1 = (idx + 1) + idy*Nx + idz*Nx*Ny,
id_jm1 = idx + (idy - 1)*Nx + idz*Nx*Ny,
id_jm2 = idx + (idy - 2)*Nx + idz*Nx*Ny,
id_jp1 = idx + (idy + 1)*Nx + idz*Nx*Ny,
id_km1 = idx + idy*Nx + (idz - 1)*Nx*Ny,
id_km2 = idx + idy*Nx + (idz - 2)*Nx*Ny,
id_kp1 = idx + idy*Nx + (idz + 1)*Nx*Ny,
id_ip2 = (idx + 2) + idy*Nx + idz*Nx*Ny,
id_jp2 = idx + (idy + 2)*Nx + idz*Nx*Ny,
id_kp2 = idx + idy*Nx + (idz + 2)*Nx*Ny,
id_ip3 = (idx + 3) + idy*Nx + idz*Nx*Ny,
id_jp3 = idx + (idy + 3)*Nx + idz*Nx*Ny,
id_kp3 = idx + idy*Nx + (idz + 3)*Nx*Ny;
//Dealing with boundaries
if(idx == 0 ){id_im1 = id;}
if(idx == Nx -1){id_ip1 = id; id_ip2 = id_im1; id_ip3 = id_im2;}
if(idx == Nx -2){id_ip2 = id_ip1; id_ip3 = id;}
if(idx == Nx -3){id_ip3 = id_ip2;}
if(idy == 0 ){id_jm1 = id;}
if(idy == Ny -1){id_jp1 = id; id_jp2 = id_jm1; id_jp3 = id_jm2;}
if(idy == Ny -2){id_jp2 = id_jp1; id_jp3 = id;}
if(idy == Ny -3){id_jp3 = id_jp2;}
if(idz == 0 ){id_km1 = id;}
if(idz == Nz -1){id_kp1 = id; id_kp2 = id_km1;id_kp3 = id_km2;}
if(idz == Nz -2){id_kp2 = id_kp1; id_kp3 = id;}
if(idz == Nz -3){id_kp3 = id_kp2;}
double beta1, beta2, beta3, beta4, beta5;
//Axis X
beta1 = d_Phi[id_ip3]*d_jbn[id];
beta2 = d_Phi[id_ip2]*d_jbn[id];
beta3 = d_Phi[id_ip1]*d_jbn[id];
beta4 = d_Phi[id ]*d_jbn[id];
beta5 = d_Phi[id_im1]*d_jbn[id];
phi_xyz[id ] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Y
beta1 = d_Phi[id_jp3 + 1*Offset]*d_jbn[id + 4*Offset];
beta2 = d_Phi[id_jp2 + 1*Offset]*d_jbn[id + 4*Offset];
beta3 = d_Phi[id_jp1 + 1*Offset]*d_jbn[id + 4*Offset];
beta4 = d_Phi[id + 1*Offset]*d_jbn[id + 4*Offset];
beta5 = d_Phi[id_jm1 + 1*Offset]*d_jbn[id + 4*Offset];
phi_xyz[id + 1*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
//Axis Z
beta1 = d_Phi[id_kp3 + 2*Offset]*d_jbn[id + 8*Offset];
beta2 = d_Phi[id_kp2 + 2*Offset]*d_jbn[id + 8*Offset];
beta3 = d_Phi[id_kp1 + 2*Offset]*d_jbn[id + 8*Offset];
beta4 = d_Phi[id + 2*Offset]*d_jbn[id + 8*Offset];
beta5 = d_Phi[id_km1 + 2*Offset]*d_jbn[id + 8*Offset];
phi_xyz[id + 2*Offset] = Phi_x_WENO(beta1, beta2, beta3, beta4, beta5);
return;
}
__global__
void cuSwapToGhost(
double *ghostArray,
double *valueArray,
const int gcells,
const int Nx,
const int Ny,
const int Nz
)
{
int NxG = Nx + 2*gcells;
int NyG = Ny + 2*gcells;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny;
unsigned int idG = idx + gcells + (idy + gcells)*NxG
+ (idz + gcells)*NxG*NyG;
ghostArray[idG] = valueArray[id];
}
__global__
void cuSwapFromGhost(
double *valueArray,
double *ghostArray,
const int gcells,
const int Nx,
const int Ny,
const int Nz
)
{
int NxG = Nx + 2*gcells;
int NyG = Ny + 2*gcells;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + idy*Nx + idz*Nx*Ny;
unsigned int idG = idx + gcells + (idy + gcells)*NxG
+ (idz + gcells)*NxG*NyG;
valueArray[id] = ghostArray[idG];
}
__global__ void reini_RS_WENOJB(
double* const rs,
const double* const phiS,
const double* const deltaXYZ,
const double* const d_phiP,
const double* const d_phiM,
const double* const phiS0,
unsigned int Nx,
unsigned int Ny,
unsigned int Nz
)
{
unsigned int Offset = Nx*Ny*Nz;
unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x,
idy = blockDim.y*blockIdx.y + threadIdx.y,
idz = blockDim.z*blockIdx.z + threadIdx.z;
unsigned int id = idx + Nx*idy + Nx*Ny*idz;
double so, rs_x, rs_y, rs_z, ta, grad_mod;
double phiMax, phiMin;
ta = (double)(phiS0[id] > 0.0) - (double)(phiS0[id] < 0.0);
//Getting gradient axis X
phiMax = MAX(d_phiP[id ], 0.0)*MAX(d_phiP[id ], 0.0);
phiMin = MIN(d_phiM[id ], 0.0)*MIN(d_phiM[id ], 0.0);
rs_x = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id ], 0.0)*MAX(d_phiM[id ], 0.0);
phiMin = MIN(d_phiP[id ], 0.0)*MIN(d_phiP[id ], 0.0);
rs_x += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Y
phiMax = MAX(d_phiP[id + 1*Offset], 0.0)
*MAX(d_phiP[id + 1*Offset], 0.0);
phiMin = MIN(d_phiM[id + 1*Offset], 0.0)
*MIN(d_phiM[id + 1*Offset], 0.0);
rs_y = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 1*Offset], 0.0)
*MAX(d_phiM[id + 1*Offset], 0.0);
phiMin = MIN(d_phiP[id + 1*Offset], 0.0)
*MIN(d_phiP[id + 1*Offset], 0.0);
rs_y += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
//Getting gradient axis Z
phiMax = MAX(d_phiP[id + 2*Offset], 0.0)
*MAX(d_phiP[id + 2*Offset], 0.0);
phiMin = MIN(d_phiM[id + 2*Offset], 0.0)
*MIN(d_phiM[id + 2*Offset], 0.0);
rs_z = 0.5*(ta + 1.0)*MAX(phiMax, phiMin);
phiMax = MAX(d_phiM[id + 2*Offset], 0.0)
*MAX(d_phiM[id + 2*Offset], 0.0);
phiMin = MIN(d_phiP[id + 2*Offset], 0.0)
*MIN(d_phiP[id + 2*Offset], 0.0);
rs_z += 0.5*abs(ta - 1.0)*MAX(phiMax, phiMin);
grad_mod = sqrt(rs_x + rs_y + rs_z);
so = phiS0[id]
/ sqrt(phiS0[id]*phiS0[id] + deltaXYZ[id]*deltaXYZ[id] );
rs[id] = 1.0*so*(grad_mod - 1.0);
return;
}
|
6e6df111d3f8a6423a22a85f1c5c4a2436a24f83.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Example showing the use of CUFFT for fast 1D-convolution using FFT.
* This sample is the same as simpleCUFFT, except that it uses a callback
* function to perform the pointwise multiply and scale, on input to the
* inverse transform.
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
// This is the callback routine prototype
static __device__ hipfftComplex ComplexPointwiseMulAndScale(void * a, size_t index, void * cb_info, void *sharedmem);
typedef struct _cb_params{
Complex *filter;
float scale;
} cb_params;
// This is the callback routine. It does complex pointwise multiplication with scaling.
static __device__ hipfftComplex ComplexPointwiseMulAndScale(void *a, size_t index, void *cb_info, void *sharedmem)
{
cb_params * my_params = (cb_params *)cb_info;
return (hipfftComplex)ComplexScale(ComplexMul(((Complex *)a)[index],
(my_params->filter)[index]),
my_params->scale);
}
// Define the device pointer to the callback routine. The host code will fetch this and pass it to CUFFT
__device__ cufftCallbackLoadC myOwnCallbackPtr = ComplexPointwiseMulAndScale;
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int,
const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
/*
struct hipDeviceProp_t properties;
int device;
checkCudaErrors(hipGetDevice(&device));
checkCudaErrors(hipGetDeviceProperties(&properties, device));
if( !(properties.major >= 2) ) {
printf("simpleCUFFT_callback requires CUDA architecture SM2.0 or higher\n");
return EXIT_WAIVED;
}
*/
return runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUFFT callbacks
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char **argv)
{
printf("[simpleCUFFT_callback] is starting...\n");
//findCudaDevice(argc, (const char **)argv);
int devID = 0;
if(argc == 2) {
devID = atoi(argv[1]);
}
printf("select device : %d\n", devID);
hipSetDevice(devID);
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Allocate host memory for the signal
Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initialize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
{
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initialize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i)
{
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(hipMalloc((void **)&d_signal, mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_signal, h_padded_signal, mem_size,
hipMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(hipMalloc((void **)&d_filter_kernel, mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
hipMemcpyHostToDevice));
// Create one CUFFT plan for the forward transforms, and one for the reverse transform
// with load callback.
hipfftHandle plan, cb_plan;
size_t work_size;
checkCudaErrors(hipfftCreate(&plan));
checkCudaErrors(hipfftCreate(&cb_plan));
checkCudaErrors(hipfftMakePlan1d(plan, new_size, HIPFFT_C2C, 1, &work_size));
checkCudaErrors(hipfftMakePlan1d(cb_plan, new_size, HIPFFT_C2C, 1, &work_size));
// Define a structure used to pass in the device address of the filter kernel, and
// the scale factor
cb_params h_params;
h_params.filter = d_filter_kernel;
h_params.scale = 1.0f / new_size;
// Allocate device memory for parameters
cb_params *d_params;
checkCudaErrors(hipMalloc((void **)&d_params, sizeof(cb_params)));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_params, &h_params, sizeof(cb_params),
hipMemcpyHostToDevice));
// The host needs to get a copy of the device pointer to the callback
cufftCallbackLoadC hostCopyOfCallbackPtr;
checkCudaErrors(hipMemcpyFromSymbol(&hostCopyOfCallbackPtr,
myOwnCallbackPtr,
sizeof(hostCopyOfCallbackPtr)));
// Now associate the load callback with the plan.
hipfftResult status = cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params);
if (status == HIPFFT_LICENSE_ERROR)
{
printf("This sample requires a valid license file.\n");
printf("The file was either not found, out of date, or otherwise invalid.\n");
return EXIT_WAIVED;
}
checkCudaErrors(cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params));
// Transform signal and kernel
printf("Transforming signal hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD));
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD));
// Transform signal back, using the callback to do the pointwise multiply on the way in.
printf("Transforming signal back hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(cb_plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(hipMemcpy(h_convolved_signal, d_signal, mem_size,
hipMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
//Destroy CUFFT context
checkCudaErrors(hipfftDestroy(plan));
checkCudaErrors(hipfftDestroy(cb_plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(hipFree(d_signal));
checkCudaErrors(hipFree(d_filter_kernel));
checkCudaErrors(hipFree(d_params));
return bTestResult ? EXIT_SUCCESS : EXIT_FAILURE;
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i)
{
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j)
{
int k = i + j;
if (k >= 0 && k < signal_size)
{
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
| 6e6df111d3f8a6423a22a85f1c5c4a2436a24f83.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Example showing the use of CUFFT for fast 1D-convolution using FFT.
* This sample is the same as simpleCUFFT, except that it uses a callback
* function to perform the pointwise multiply and scale, on input to the
* inverse transform.
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
// This is the callback routine prototype
static __device__ cufftComplex ComplexPointwiseMulAndScale(void * a, size_t index, void * cb_info, void *sharedmem);
typedef struct _cb_params{
Complex *filter;
float scale;
} cb_params;
// This is the callback routine. It does complex pointwise multiplication with scaling.
static __device__ cufftComplex ComplexPointwiseMulAndScale(void *a, size_t index, void *cb_info, void *sharedmem)
{
cb_params * my_params = (cb_params *)cb_info;
return (cufftComplex)ComplexScale(ComplexMul(((Complex *)a)[index],
(my_params->filter)[index]),
my_params->scale);
}
// Define the device pointer to the callback routine. The host code will fetch this and pass it to CUFFT
__device__ cufftCallbackLoadC myOwnCallbackPtr = ComplexPointwiseMulAndScale;
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int,
const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
/*
struct cudaDeviceProp properties;
int device;
checkCudaErrors(cudaGetDevice(&device));
checkCudaErrors(cudaGetDeviceProperties(&properties, device));
if( !(properties.major >= 2) ) {
printf("simpleCUFFT_callback requires CUDA architecture SM2.0 or higher\n");
return EXIT_WAIVED;
}
*/
return runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUFFT callbacks
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char **argv)
{
printf("[simpleCUFFT_callback] is starting...\n");
//findCudaDevice(argc, (const char **)argv);
int devID = 0;
if(argc == 2) {
devID = atoi(argv[1]);
}
printf("select device : %d\n", devID);
cudaSetDevice(devID);
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Allocate host memory for the signal
Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initialize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
{
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initialize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i)
{
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(cudaMalloc((void **)&d_signal, mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_signal, h_padded_signal, mem_size,
cudaMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(cudaMalloc((void **)&d_filter_kernel, mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
cudaMemcpyHostToDevice));
// Create one CUFFT plan for the forward transforms, and one for the reverse transform
// with load callback.
cufftHandle plan, cb_plan;
size_t work_size;
checkCudaErrors(cufftCreate(&plan));
checkCudaErrors(cufftCreate(&cb_plan));
checkCudaErrors(cufftMakePlan1d(plan, new_size, CUFFT_C2C, 1, &work_size));
checkCudaErrors(cufftMakePlan1d(cb_plan, new_size, CUFFT_C2C, 1, &work_size));
// Define a structure used to pass in the device address of the filter kernel, and
// the scale factor
cb_params h_params;
h_params.filter = d_filter_kernel;
h_params.scale = 1.0f / new_size;
// Allocate device memory for parameters
cb_params *d_params;
checkCudaErrors(cudaMalloc((void **)&d_params, sizeof(cb_params)));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_params, &h_params, sizeof(cb_params),
cudaMemcpyHostToDevice));
// The host needs to get a copy of the device pointer to the callback
cufftCallbackLoadC hostCopyOfCallbackPtr;
checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr,
myOwnCallbackPtr,
sizeof(hostCopyOfCallbackPtr)));
// Now associate the load callback with the plan.
cufftResult status = cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params);
if (status == CUFFT_LICENSE_ERROR)
{
printf("This sample requires a valid license file.\n");
printf("The file was either not found, out of date, or otherwise invalid.\n");
return EXIT_WAIVED;
}
checkCudaErrors(cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params));
// Transform signal and kernel
printf("Transforming signal cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD));
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD));
// Transform signal back, using the callback to do the pointwise multiply on the way in.
printf("Transforming signal back cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(cb_plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(cudaMemcpy(h_convolved_signal, d_signal, mem_size,
cudaMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
//Destroy CUFFT context
checkCudaErrors(cufftDestroy(plan));
checkCudaErrors(cufftDestroy(cb_plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(cudaFree(d_signal));
checkCudaErrors(cudaFree(d_filter_kernel));
checkCudaErrors(cudaFree(d_params));
return bTestResult ? EXIT_SUCCESS : EXIT_FAILURE;
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i)
{
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j)
{
int k = i + j;
if (k >= 0 && k < signal_size)
{
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
|
c4742cdc63760bb3031be222877976b2715272e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <cstdlib>
#include "waveio.h"
#include "wave.cuh"
#include <sys/time.h>
void wave_duplicate(wave_t* wave) {
int i;
wave->data = (short*) realloc(wave->data, sizeof(short) * 2 * wave->data_length);
for (i = 0; i < wave->data_length; i++) {
wave->data[i + wave->data_length] = wave->data[i];
}
wave->file_length += 2 * wave->data_length;
wave->data_length *= 2;
}
double file_size(int array_size) {
return((double) (sizeof(short) * array_size)) / (1024.0 * 1024.0);
}
int main(int argc, const char *argv[]) {
for (int i = 0; i < 8; i++) {
unsigned long long micros;
struct timeval t1, t2;
wave_t *sound;
/* loads the wave from file */
sound = wave_read(argv[1]);
printf("Cuda:\n");
for (int j = 0; j < i; j++)
wave_duplicate(sound);
short *d_data, *d_buffer;
size_t size = sizeof(short) * sound->data_length;
hipMalloc((void**)&d_data, size);
hipMalloc((void**)&d_buffer, size);
gettimeofday(&t1, NULL);
hipMemcpy(d_data, sound->data, size, hipMemcpyHostToDevice);
hipMemcpy(d_buffer, d_data, size, hipMemcpyDeviceToDevice);
int nBlocks = 32768;
int blockSize = 512;
/* edit here to change filter */
hipLaunchKernelGGL(( wave_filter_mean) , dim3(nBlocks), dim3(blockSize) , 0, 0, d_data, sound->data_length, d_buffer);
// wave_filter_median <<< nBlocks, blockSize >>>(d_data, sound->data_length, d_buffer);
// wave_filter_gaussian <<< nBlocks, blockSize >>>(d_data, sound->data_length, d_buffer);
//hipLaunchKernelGGL(( wave_filter_mean) , dim3(nBlocks), dim3(blockSize) , 0, 0, d_data, sound->data_length, d_buffer);
hipMemcpy(sound->data, d_data, size, hipMemcpyDeviceToHost);
hipMemcpy(d_buffer, d_data, 1, hipMemcpyDeviceToDevice);
gettimeofday(&t2, NULL);
micros = (t2.tv_sec * 1000000 + t2.tv_usec) - (t1.tv_sec * 1000000 + t1.tv_usec);
printf(" %d: %.3f\n", i, micros / 1000.0); //milisegundos
//printf(" %d: %.3f\n", i, file_size(sound->data_length));
hipFree(d_data);
hipFree(d_buffer);
}
// wave_write(argv[2], sound);
return 0;
}
| c4742cdc63760bb3031be222877976b2715272e3.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <cstdlib>
#include "waveio.h"
#include "wave.cuh"
#include <sys/time.h>
void wave_duplicate(wave_t* wave) {
int i;
wave->data = (short*) realloc(wave->data, sizeof(short) * 2 * wave->data_length);
for (i = 0; i < wave->data_length; i++) {
wave->data[i + wave->data_length] = wave->data[i];
}
wave->file_length += 2 * wave->data_length;
wave->data_length *= 2;
}
double file_size(int array_size) {
return((double) (sizeof(short) * array_size)) / (1024.0 * 1024.0);
}
int main(int argc, const char *argv[]) {
for (int i = 0; i < 8; i++) {
unsigned long long micros;
struct timeval t1, t2;
wave_t *sound;
/* loads the wave from file */
sound = wave_read(argv[1]);
printf("Cuda:\n");
for (int j = 0; j < i; j++)
wave_duplicate(sound);
short *d_data, *d_buffer;
size_t size = sizeof(short) * sound->data_length;
cudaMalloc((void**)&d_data, size);
cudaMalloc((void**)&d_buffer, size);
gettimeofday(&t1, NULL);
cudaMemcpy(d_data, sound->data, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_buffer, d_data, size, cudaMemcpyDeviceToDevice);
int nBlocks = 32768;
int blockSize = 512;
/* edit here to change filter */
wave_filter_mean <<< nBlocks, blockSize >>>(d_data, sound->data_length, d_buffer);
// wave_filter_median <<< nBlocks, blockSize >>>(d_data, sound->data_length, d_buffer);
// wave_filter_gaussian <<< nBlocks, blockSize >>>(d_data, sound->data_length, d_buffer);
// wave_filter_mean <<< nBlocks, blockSize >>>(d_data, sound->data_length, d_buffer);
cudaMemcpy(sound->data, d_data, size, cudaMemcpyDeviceToHost);
cudaMemcpy(d_buffer, d_data, 1, cudaMemcpyDeviceToDevice);
gettimeofday(&t2, NULL);
micros = (t2.tv_sec * 1000000 + t2.tv_usec) - (t1.tv_sec * 1000000 + t1.tv_usec);
printf(" %d: %.3f\n", i, micros / 1000.0); //milisegundos
//printf(" %d: %.3f\n", i, file_size(sound->data_length));
cudaFree(d_data);
cudaFree(d_buffer);
}
// wave_write(argv[2], sound);
return 0;
}
|
18ec98d3069c8d5232f8c9bd24fc3afeb15c08df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_sub(count, bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(count, diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2), diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(CblasNoTrans, bottom[0]->num(), bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(), Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
greentea_gpu_sub<Dtype>(this->device_->id(), count,
(cl_mem) (bottom[0]->gpu_data()), 0,
(cl_mem) (bottom[1]->gpu_data()), 0,
(cl_mem) (diff_.mutable_gpu_data()), 0);
greentea_gpu_powx<Dtype>(this->device_->id(), count,
(cl_mem) (diff_.mutable_gpu_data()),
0, // a_i-b_i
Dtype(2), (cl_mem) (diff_sq_.mutable_gpu_data()),
0); // (a_i-b_i)^2
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans,
bottom[0]->num(), bottom[0]->channels(),
Dtype(1.0), (cl_mem) (diff_sq_.gpu_data()),
0, // (a_i-b_i)^2
(cl_mem) (summer_vec_.gpu_data()), 0, Dtype(0.0),
(cl_mem) (dist_sq_.mutable_gpu_data()), 0);
#endif // USE_GREENTEA
}
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version = this->layer_param_.contrastive_loss_param()
.legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
if (legacy_version) {
loss += ::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
} else {
Dtype dist = ::max(margin - (Dtype) sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
loss += dist * dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version,
const Dtype alpha, const Dtype* y,
const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
#endif // USE_ROCM
template<typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version = this->layer_param_.contrastive_loss_param()
.legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0]
/ static_cast<Dtype>(bottom[0]->num());
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
CLLBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
viennacl::ocl::kernel &oclk_cll = program.get_kernel(
CL_KERNEL_SELECT("cll_backward"));
viennacl::ocl::enqueue(
oclk_cll(
count, channels, margin, legacy_version ? 1 : 0, alpha,
WrapHandle((cl_mem) (bottom[2]->gpu_data()), &ctx),
WrapHandle((cl_mem) (diff_.gpu_data()), &ctx),
WrapHandle((cl_mem) (dist_sq_.gpu_data()), &ctx),
WrapHandle((cl_mem) (bottom[i]->mutable_gpu_diff()), &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace caffe
| 18ec98d3069c8d5232f8c9bd24fc3afeb15c08df.cu | #include <algorithm>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_sub(count, bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(count, diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2), diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(CblasNoTrans, bottom[0]->num(), bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(), Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
greentea_gpu_sub<Dtype>(this->device_->id(), count,
(cl_mem) (bottom[0]->gpu_data()), 0,
(cl_mem) (bottom[1]->gpu_data()), 0,
(cl_mem) (diff_.mutable_gpu_data()), 0);
greentea_gpu_powx<Dtype>(this->device_->id(), count,
(cl_mem) (diff_.mutable_gpu_data()),
0, // a_i-b_i
Dtype(2), (cl_mem) (diff_sq_.mutable_gpu_data()),
0); // (a_i-b_i)^2
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans,
bottom[0]->num(), bottom[0]->channels(),
Dtype(1.0), (cl_mem) (diff_sq_.gpu_data()),
0, // (a_i-b_i)^2
(cl_mem) (summer_vec_.gpu_data()), 0, Dtype(0.0),
(cl_mem) (dist_sq_.mutable_gpu_data()), 0);
#endif // USE_GREENTEA
}
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version = this->layer_param_.contrastive_loss_param()
.legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
if (legacy_version) {
loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
} else {
Dtype dist = std::max(margin - (Dtype) sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
loss += dist * dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version,
const Dtype alpha, const Dtype* y,
const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
#endif // USE_CUDA
template<typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version = this->layer_param_.contrastive_loss_param()
.legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0]
/ static_cast<Dtype>(bottom[0]->num());
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
CLLBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram(
this->device_->id());
viennacl::ocl::kernel &oclk_cll = program.get_kernel(
CL_KERNEL_SELECT("cll_backward"));
viennacl::ocl::enqueue(
oclk_cll(
count, channels, margin, legacy_version ? 1 : 0, alpha,
WrapHandle((cl_mem) (bottom[2]->gpu_data()), &ctx),
WrapHandle((cl_mem) (diff_.gpu_data()), &ctx),
WrapHandle((cl_mem) (dist_sq_.gpu_data()), &ctx),
WrapHandle((cl_mem) (bottom[i]->mutable_gpu_diff()), &ctx)),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace caffe
|
1b513308b8f38df7aa42b4c4ac9d828f70a68684.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "modules/detectron/group_spatial_softmax_op.h"
namespace caffe2 {
namespace {
__global__ void GroupSpatialSoftmaxKernel(const int num, const int A, const int W,
const int H, const float* Xdata, float* Pdata, const int num_classes) {
// Loop through labels (N x A x H x W)
CUDA_1D_KERNEL_LOOP(index, num * A * H * W) {
int D = num_classes * A;
int x = index % W;
int y = (index / W) % H;
int a = (index / (W * H)) % A;
int i = index / W / H / A;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
__global__ void SumProbsKernel(const int N, const int A, const int W,
const int H, const float* Ydata, const float* dYdata,
float* sum_probs_data, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * A * W * H) {
int D = num_classes * A;
int x = i % W;
int y = (i / W) % H;
int a = (i / (W * H)) % A;
int n = i / (W * H * A);
sum_probs_data[i] = 0.0;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = n * (H * W * D) + c * (H * W) + y * W + x;
sum_probs_data[i] += (Ydata[idx] * dYdata[idx]);
}
}
}
__global__ void SubSumKernel(
const int N, const int A, const int W, const int H,
const float* sum_probs_data, float* dXdata, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * (A * num_classes) * W * H) {
int D = num_classes * A;
int x = i % W;
int y = (i / W) % H;
int a = ((i / (W * H)) % D) / num_classes;
int n = i / W / H / D;
int idx = n * (H * W * A) + a * (H * W) + y * W + x;
dXdata[i] = (dXdata[i] - sum_probs_data[idx]);
}
}
} // namespace
template <>
bool GroupSpatialSoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
int N = X.dim32(0);
int D = X.dim32(1);
int H = X.dim32(2);
int W = X.dim32(3);
int A = D / num_classes_;
auto* P = Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax
DCHECK_EQ(X.ndim(), 4);
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
hipLaunchKernelGGL(( GroupSpatialSoftmaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, A, W, H, Xdata, Pdata, num_classes_);
return true;
}
template<>
bool GroupSpatialSoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0); // Probabilities from softmax
auto& dY = Input(1);
DCHECK_EQ(Y.ndim(), 4);
int N = Y.dim32(0);
int D = Y.dim32(1);
int H = Y.dim32(2);
int W = Y.dim32(3);
int A = D / num_classes_;
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
if (sum_probs_.size() != N * A * H * W) {
ReinitializeTensor(&sum_probs_, {N * A * H * W}, at::dtype<float>().device(CUDA));
}
const float* Ydata = Y.data<float>();
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
float* sum_probs_data = sum_probs_.mutable_data<float>();
math::Set<float, CUDAContext>(
sum_probs_.size(), 0.0f, sum_probs_data, &context_);
// Complete math:
// J_ij = h_i (delta_ij - h_j)
// d x_i = sum_j d h_ij = sum_j J_ij * dy_j
// = sum_j h_i (delta_ij - h_j) * dy_j
// = h_i dy_i - (sum_j h_i h_j dy_j)
// = h_i dy_i - h_i sum_j h_j dy_j
// Step 0: dx = dy
context_.Copy<float, CUDAContext, CUDAContext>(Y.size(), dYdata, dXdata);
// Step 1: s = Sum(dY[j] * Y[j])
hipLaunchKernelGGL(( SumProbsKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
N, A, W, H, Ydata, dYdata, sum_probs_data, num_classes_);
// Step 2: dX[i] = dX[i] - s
hipLaunchKernelGGL(( SubSumKernel), dim3(CAFFE_GET_BLOCKS(Y.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
N, A, W, H, sum_probs_.data<float>(), dXdata, num_classes_);
// Step 3: dX[i] = Y[i] * dX[i]
math::Mul<float, CUDAContext>(Y.size(), dXdata, Ydata, dXdata, &context_);
return true;
}
REGISTER_CUDA_OPERATOR(GroupSpatialSoftmax,
GroupSpatialSoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(GroupSpatialSoftmaxGradient,
GroupSpatialSoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
| 1b513308b8f38df7aa42b4c4ac9d828f70a68684.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "modules/detectron/group_spatial_softmax_op.h"
namespace caffe2 {
namespace {
__global__ void GroupSpatialSoftmaxKernel(const int num, const int A, const int W,
const int H, const float* Xdata, float* Pdata, const int num_classes) {
// Loop through labels (N x A x H x W)
CUDA_1D_KERNEL_LOOP(index, num * A * H * W) {
int D = num_classes * A;
int x = index % W;
int y = (index / W) % H;
int a = (index / (W * H)) % A;
int i = index / W / H / A;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
__global__ void SumProbsKernel(const int N, const int A, const int W,
const int H, const float* Ydata, const float* dYdata,
float* sum_probs_data, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * A * W * H) {
int D = num_classes * A;
int x = i % W;
int y = (i / W) % H;
int a = (i / (W * H)) % A;
int n = i / (W * H * A);
sum_probs_data[i] = 0.0;
for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) {
int idx = n * (H * W * D) + c * (H * W) + y * W + x;
sum_probs_data[i] += (Ydata[idx] * dYdata[idx]);
}
}
}
__global__ void SubSumKernel(
const int N, const int A, const int W, const int H,
const float* sum_probs_data, float* dXdata, const int num_classes) {
CUDA_1D_KERNEL_LOOP(i, N * (A * num_classes) * W * H) {
int D = num_classes * A;
int x = i % W;
int y = (i / W) % H;
int a = ((i / (W * H)) % D) / num_classes;
int n = i / W / H / D;
int idx = n * (H * W * A) + a * (H * W) + y * W + x;
dXdata[i] = (dXdata[i] - sum_probs_data[idx]);
}
}
} // namespace
template <>
bool GroupSpatialSoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
int N = X.dim32(0);
int D = X.dim32(1);
int H = X.dim32(2);
int W = X.dim32(3);
int A = D / num_classes_;
auto* P = Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax
DCHECK_EQ(X.ndim(), 4);
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
GroupSpatialSoftmaxKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, A, W, H, Xdata, Pdata, num_classes_);
return true;
}
template<>
bool GroupSpatialSoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0); // Probabilities from softmax
auto& dY = Input(1);
DCHECK_EQ(Y.ndim(), 4);
int N = Y.dim32(0);
int D = Y.dim32(1);
int H = Y.dim32(2);
int W = Y.dim32(3);
int A = D / num_classes_;
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
if (sum_probs_.size() != N * A * H * W) {
ReinitializeTensor(&sum_probs_, {N * A * H * W}, at::dtype<float>().device(CUDA));
}
const float* Ydata = Y.data<float>();
const float* dYdata = dY.data<float>();
float* dXdata = dX->mutable_data<float>();
float* sum_probs_data = sum_probs_.mutable_data<float>();
math::Set<float, CUDAContext>(
sum_probs_.size(), 0.0f, sum_probs_data, &context_);
// Complete math:
// J_ij = h_i (delta_ij - h_j)
// d x_i = sum_j d h_ij = sum_j J_ij * dy_j
// = sum_j h_i (delta_ij - h_j) * dy_j
// = h_i dy_i - (sum_j h_i h_j dy_j)
// = h_i dy_i - h_i sum_j h_j dy_j
// Step 0: dx = dy
context_.Copy<float, CUDAContext, CUDAContext>(Y.size(), dYdata, dXdata);
// Step 1: s = Sum(dY[j] * Y[j])
SumProbsKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N, A, W, H, Ydata, dYdata, sum_probs_data, num_classes_);
// Step 2: dX[i] = dX[i] - s
SubSumKernel<<<CAFFE_GET_BLOCKS(Y.size()), CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
N, A, W, H, sum_probs_.data<float>(), dXdata, num_classes_);
// Step 3: dX[i] = Y[i] * dX[i]
math::Mul<float, CUDAContext>(Y.size(), dXdata, Ydata, dXdata, &context_);
return true;
}
REGISTER_CUDA_OPERATOR(GroupSpatialSoftmax,
GroupSpatialSoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(GroupSpatialSoftmaxGradient,
GroupSpatialSoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
dd2bdc7cfb19823467d55e0b9f2bdafd293f7b01.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Log_V.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Log_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,out,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Log_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Log_V), dim3(gridBlock),dim3(threadBlock), 0, 0, a,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | dd2bdc7cfb19823467d55e0b9f2bdafd293f7b01.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Log_V.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Log_V<<<gridBlock,threadBlock>>>(a,out,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Log_V<<<gridBlock,threadBlock>>>(a,out,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Log_V<<<gridBlock,threadBlock>>>(a,out,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
707051f32abf6341a44ea10a84bb84c7d2534191.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/allclose_kernel.h"
namespace phi {
template <typename T>
__global__ void AllcloseCUDAKernel(const T* in_data,
const T* other_data,
const double rtol,
const double atol,
bool equal_nan,
int num,
bool* out_data) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
bool val;
for (int i = idx; i < num; i += blockDim.x * gridDim.x) {
const T a = in_data[i], b = other_data[i];
if (isnan(a) || isnan(b)) {
val = equal_nan && isnan(a) == isnan(b);
} else {
T left = (a > b ? a - b : b - a);
T right = atol + (b > 0 ? rtol * b : (-rtol) * b);
T diff = (left > right ? left - right : right - left);
val = a == b || left <= right || diff <= 1e-15;
}
if (!val) *out_data = false;
}
}
template <typename T, typename Context>
void AllCloseKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const Scalar& rtol,
const Scalar& atol,
bool equal_nan,
DenseTensor* out) {
double rtol_v, atol_v;
if (rtol.dtype() == DataType::FLOAT64) {
rtol_v = rtol.to<double>();
} else if (rtol.dtype() == DataType::FLOAT32) {
rtol_v = rtol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Rtol) type must be double or float, but get %s.",
rtol.dtype()));
}
if (atol.dtype() == DataType::FLOAT64) {
atol_v = atol.to<double>();
} else if (atol.dtype() == DataType::FLOAT32) {
atol_v = atol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Atol) type must be double or float, but get %s.",
atol.dtype()));
}
VLOG(3) << "rtol and atol is : " << rtol_v << " " << atol_v;
const T* in_data = x.data<T>();
const T* other_data = y.data<T>();
bool* out_data = dev_ctx.template Alloc<bool>(out);
int num = x.numel();
int block = 1024;
int grid = (block - 1 + num) / block;
grid = (grid > block) ? block : grid;
#ifdef PADDLE_WITH_HIP
hipMemset(out_data, true, sizeof(bool));
#else
hipMemset(out_data, true, sizeof(bool));
#endif
hipLaunchKernelGGL(( AllcloseCUDAKernel<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
in_data, other_data, rtol_v, atol_v, equal_nan, num, out_data);
}
} // namespace phi
PD_REGISTER_KERNEL(
allclose, GPU, ALL_LAYOUT, phi::AllCloseKernel, float, double) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
}
| 707051f32abf6341a44ea10a84bb84c7d2534191.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/allclose_kernel.h"
namespace phi {
template <typename T>
__global__ void AllcloseCUDAKernel(const T* in_data,
const T* other_data,
const double rtol,
const double atol,
bool equal_nan,
int num,
bool* out_data) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
bool val;
for (int i = idx; i < num; i += blockDim.x * gridDim.x) {
const T a = in_data[i], b = other_data[i];
if (isnan(a) || isnan(b)) {
val = equal_nan && isnan(a) == isnan(b);
} else {
T left = (a > b ? a - b : b - a);
T right = atol + (b > 0 ? rtol * b : (-rtol) * b);
T diff = (left > right ? left - right : right - left);
val = a == b || left <= right || diff <= 1e-15;
}
if (!val) *out_data = false;
}
}
template <typename T, typename Context>
void AllCloseKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const Scalar& rtol,
const Scalar& atol,
bool equal_nan,
DenseTensor* out) {
double rtol_v, atol_v;
if (rtol.dtype() == DataType::FLOAT64) {
rtol_v = rtol.to<double>();
} else if (rtol.dtype() == DataType::FLOAT32) {
rtol_v = rtol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Rtol) type must be double or float, but get %s.",
rtol.dtype()));
}
if (atol.dtype() == DataType::FLOAT64) {
atol_v = atol.to<double>();
} else if (atol.dtype() == DataType::FLOAT32) {
atol_v = atol.to<float>();
} else {
PADDLE_THROW(phi::errors::InvalidArgument(
"Input (Atol) type must be double or float, but get %s.",
atol.dtype()));
}
VLOG(3) << "rtol and atol is : " << rtol_v << " " << atol_v;
const T* in_data = x.data<T>();
const T* other_data = y.data<T>();
bool* out_data = dev_ctx.template Alloc<bool>(out);
int num = x.numel();
int block = 1024;
int grid = (block - 1 + num) / block;
grid = (grid > block) ? block : grid;
#ifdef PADDLE_WITH_HIP
hipMemset(out_data, true, sizeof(bool));
#else
cudaMemset(out_data, true, sizeof(bool));
#endif
AllcloseCUDAKernel<T><<<grid, block, 0, dev_ctx.stream()>>>(
in_data, other_data, rtol_v, atol_v, equal_nan, num, out_data);
}
} // namespace phi
PD_REGISTER_KERNEL(
allclose, GPU, ALL_LAYOUT, phi::AllCloseKernel, float, double) {
kernel->OutputAt(0).SetDataType(phi::DataType::BOOL);
}
|
cf8d617d9932171b9224d8f44d1471e28d90be2b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include "definitions.h"
#include "cudaFunctions.h"
/*
find the best mutant, the best one will return by pointer
return True if succeed, otherwise - False
*/
__host__ BOOL computeOnGPU(Mutant* bestMutant, char* seq1, char* seq2, int lenSeq1, int lenSeq2,
double weights[], BOOL isMax, int startOffset,int endOffset)
{
int numOfOffsetsToCalc = endOffset - startOffset + 1;
if(numOfOffsetsToCalc == 0) // if is it equal to 0, no need to do somthing
return True;
// error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// determine the size of memory to allocate
int sizeMemSeq1 = lenSeq1 * sizeof(char);
int sizeMemSeq2 = lenSeq2 * sizeof(char);
int sizeWeights = 4 * sizeof(double);
int sizeResults = numOfOffsetsToCalc * sizeof(CudaResult);
// allocate on GPU
char* gpuSeq1;
char* gpuSeq2;
double* gpuWeights;
CudaResult* gpuResults;
err = hipMalloc((void**) &gpuSeq1, sizeMemSeq1);
if (err != hipSuccess)
{
fprintf(stderr, "failed to allocate device memory - %s\n", hipGetErrorString(err));
return False;
}
err = hipMalloc((void**) &gpuSeq2, sizeMemSeq2);
if (err != hipSuccess)
{
hipFree(gpuSeq1);
fprintf(stderr, "failed to allocate device memory - %s\n", hipGetErrorString(err));
return False;
}
err = hipMalloc((void**) &gpuWeights, sizeWeights);
if (err != hipSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, NULL);
fprintf(stderr, "failed to allocate device memory - %s\n", hipGetErrorString(err));
return False;
}
err = hipMalloc((void**) &gpuResults, sizeResults);
if (err != hipSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, NULL);
fprintf(stderr, "failed to allocate device memory - %s\n", hipGetErrorString(err));
return False;
}
// copy data from host to the GPU memory
if(hipMemcpy(gpuSeq1, seq1, sizeMemSeq1, hipMemcpyHostToDevice) != hipSuccess
|| hipMemcpy(gpuSeq2, seq2, sizeMemSeq2, hipMemcpyHostToDevice) != hipSuccess
|| hipMemcpy(gpuWeights, weights, sizeWeights, hipMemcpyHostToDevice) != hipSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, NULL);
fprintf(stderr, "failed to copy data from host to device \n");
return False;
}
// launch the Kernel
int threadsPerBlock = calcNumThreadsPerBlock(numOfOffsetsToCalc);
int blocksPerGrid = (numOfOffsetsToCalc + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( findBestMutant), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, gpuResults, gpuSeq1, gpuSeq2, lenSeq1, lenSeq2, gpuWeights,
isMax, startOffset, endOffset);
err = hipGetLastError();
if (err != hipSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, NULL);
fprintf(stderr, "failed to launch the kernel - %s\n", hipGetErrorString(err));
return False;
}
CudaResult* cpuResults = (CudaResult*) malloc(sizeResults);
if(!cpuResults)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, NULL);
fprintf(stderr, "malloc failed\n");
return False;
}
err = hipMemcpy(cpuResults, gpuResults,sizeResults, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, cpuResults, NULL);
fprintf(stderr, "failed to copy result from device to host - %s\n", hipGetErrorString(err));
return False;
}
for(int i = 0; i < numOfOffsetsToCalc; i ++)
{
// check if better result found
if ((isMax && cpuResults[i].score > bestMutant->score) || // looking for highest score
(!isMax && cpuResults[i].score < bestMutant->score)) // looking for lowest score
{
bestMutant->score = cpuResults[i].score;
bestMutant->offset = cpuResults[i].mutantOffset;
// check if substitute char is needed
if(cpuResults[i].charToPut != '-')
substituteChar(bestMutant, cpuResults, i, seq2, lenSeq2);
}
}
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, NULL);
return True;
}
__host__ int calcNumThreadsPerBlock(int numOfOffsetsToCalc)
{
// calculate threads needed and find power of 2 that fits the job
int numThreadsNeeded = 1;
while (numThreadsNeeded < numOfOffsetsToCalc)
numThreadsNeeded *= 2;
int threadsInBlock = 1;
int i = threadsInBlock * 2;
while(i < 1024 + 1)
{
if(numThreadsNeeded % numOfOffsetsToCalc > numThreadsNeeded % i)
threadsInBlock = i;
i *= 2;
}
return threadsInBlock;
}
/*
substitute a character based on given results
this functuion invoked only after we checked if we need to substitute at all
*/
__host__ void substituteChar(Mutant* mutant, CudaResult* result, int i, char* seq, int lenSeq)
{
memcpy(mutant->mutantSeq, seq, lenSeq * sizeof(char));
mutant->mutantSeq[lenSeq+1] = '\0';
mutant->mutantSeq[result[i].charOffset] = result[i].charToPut;
}
__global__ void findBestMutant(CudaResult* result, char* seq1, char* seq2, int lenSeq1, int lenSeq2,
double weights[], BOOL isMax, int startOffset,int endOffset)
{
int tId = blockIdx.x * blockDim.x + threadIdx.x;
int offset = tId + startOffset;
if(offset < startOffset || offset > endOffset)
return;
Mutant* foundedMutant = findMutantBestScoreGivenOffset(seq1, seq2, lenSeq1, lenSeq2, offset, weights, isMax);
if(!foundedMutant)
return;
// initialization
result[tId].charToPut = '-';
result[tId].charOffset = -1;
for(int i = 0; i < lenSeq2; i++)
{
if (foundedMutant->mutantSeq[i] != seq2[i])
{
result[tId].charToPut = foundedMutant->mutantSeq[i];
result[tId].charOffset = i;
}
}
result[tId].mutantOffset = offset;
result[tId].score = foundedMutant->score;
cudaFreeMutant(foundedMutant);
}
/*
given an offset, search for a best mutant as possible
returns a pointer to a Mutant
*/
__host__ __device__ Mutant* findMutantBestScoreGivenOffset(char* seq1, char* seq2, int lenSeq1, int lenSeq2,
int offset, double weights[], BOOL isMax)
{
char* resultsSeq;
double effect = 0;
double checkedEffect = 0;
Mutant* bestMutant = (Mutant*) malloc(sizeof(Mutant));
if (!bestMutant)
{
printf("malloc failed\n");
return NULL;
}
// initialize
bestMutant->mutantSeq = (char*)malloc(lenSeq2 * sizeof(char) + 1);
if (!bestMutant->mutantSeq)
{
printf("malloc failed\n");
return NULL;
}
memcpy(bestMutant->mutantSeq, seq2, lenSeq2 * sizeof(char));
bestMutant->length = lenSeq2;
resultsSeq = getResultsSequence(seq1, seq2, lenSeq1, lenSeq2, offset);
double startScore = calcScore(resultsSeq,lenSeq2,weights);
bestMutant->score = startScore;
const char alphabet[ALPHABET_LEN] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O',
'P','Q','R','S','T','U','V','W','X','Y','Z'};
// run all over seq2
for (int i = 0; i < lenSeq2; i++)
{
// run all over the alphabet
for (int j = 0; j < ALPHABET_LEN; j++)
{
// if both are not in conservative groups, then we can substitute a character
if (!inConservativeGroups(alphabet[j], seq2[i]))
{
checkedEffect = calcChangeBetweenTwoChars(seq1[i + offset], seq2[i], alphabet[j], weights);
// check if we found a better one
if ((isMax && checkedEffect > effect) || // looking for highest score
(!isMax && checkedEffect < effect) ) // looking for lowest score
{
bestMutant->mutantSeq[i] = alphabet[j]; // substitute
effect = checkedEffect;
}
}
}
}
bestMutant->score = startScore + effect;
return bestMutant;
}
__host__ __device__ char getSign(char c1, char c2)
{
if (c1 == c2)
return '*';
if (inConservativeGroups(c1, c2))
return ':';
if (inSemiConservativeGroups(c1, c2))
return '.';
if (c2 == '-')
return '-';
return ' ';
}
__host__ __device__ char* getResultsSequence(char* seq1, char* seq2 , int size1, int size2, int offset)
{
char* results = (char*)malloc(size2 * sizeof(char) + 1);
if(!results)
{
printf("malloc failed\n");
return NULL;
}
for (int i = 0; i < size2; i++)
results[i] = getSign(seq1[i + offset], seq2[i]);
return results;
}
__host__ __device__ double calcScore(char* results, int resultsLen, double* weights)
{
int stars = 0;
int colons = 0;
int points = 0;
int spaces = 0;
double totalScore = 0;
for (int i = 0; i < resultsLen + 1; i++)
{
if (results[i] == '*')
stars++;
else if (results[i] == ':')
colons++;
else if (results[i] == '.')
points++;
else if (results[i] == ' ')
spaces++;
}
totalScore = weights[0] * stars - weights[1] * colons - weights[2] * points - weights[3] * spaces;
return totalScore;
}
/*
calculate how much one letter from the alphabet affects the result
in relation to the letter from seq2
*/
__host__ __device__ double calcChangeBetweenTwoChars(char seq1Char, char seq2Char, char anotherChar, double weights[])
{
if(anotherChar == '-')
return 0;
double currentScore;
if(seq2Char == seq1Char) // star
currentScore = weights[0];
else if(inConservativeGroups(seq2Char,seq1Char)) // colon
currentScore = -weights[1];
else if(inSemiConservativeGroups(seq2Char,seq1Char)) // point
currentScore = -weights[2];
else // space
currentScore = -weights[3];
double checkedScore;
if(anotherChar == seq1Char)
checkedScore = weights[0];
else if(inConservativeGroups(anotherChar,seq1Char))
checkedScore = -weights[1];
else if(inSemiConservativeGroups(anotherChar,seq1Char))
checkedScore = -weights[2];
else
checkedScore = -weights[3];
return checkedScore - currentScore;
}
__host__ __device__ BOOL inConservativeGroups(char c1, char c2)
{
const char* ConservativeGroups[CONSERVATIVE_GROUPS_LEN] = {"NDEQ", "NEQK", "STA", "MILV", "QHRK", "NHQK",
"FYW", "HY", "MILF"};
for (int i = 0; i < CONSERVATIVE_GROUPS_LEN; i++)
if (isExistInSeq(ConservativeGroups[i], c1, c2))
return True;
return False;
}
__host__ __device__ BOOL inSemiConservativeGroups(char c1, char c2)
{
const char* SemiConservativeGroups[SEMI_CONSERVATIVE_GROUPS_LEN] = {"SAG", "ATV", "CSA", "SGND", "STPA", "STNK",
"NEQHRK", "NDEQHK", "SNDEQK", "HFY", "FVLIM"};
for (int i = 0; i < SEMI_CONSERVATIVE_GROUPS_LEN; i++)
if (isExistInSeq(SemiConservativeGroups[i], c1, c2))
return True;
return False;
}
__host__ __device__ BOOL isExistInSeq(const char* seq, char c1, char c2)
{
if (strChar(seq, c1) && strChar(seq, c2))
return True;
return False;
}
__host__ __device__ int strLength(const char *str)
{
int length = 0;
while (*str++)
length++;
return length;
}
__host__ __device__ int strCompare(const char* s1, const char* s2)
{
while(*s1 && (*s1 == *s2))
{
s1++;
s2++;
}
return *(const unsigned char*)s1 - *(const unsigned char*)s2;
}
__host__ __device__ const char* strChar(const char* s, const char c)
{
while(*s != c && *s != '\0')
s++;
if (*s == c)
return s;
return NULL;
}
__host__ void cudaFreeAll(void *ptr, ...)
{
va_list args;
va_start(args, ptr);
while (ptr) // until ptr != NULL
{
hipError_t err = hipFree(ptr);
if (err != hipSuccess)
{
fprintf(stderr, "failed to free device data\n");
return;
}
ptr = va_arg(args, void*); //get next var
}
va_end(args);
}
__device__ void cudaFreeMutant(Mutant* mutant)
{
free(mutant->mutantSeq);
free(mutant);
} | cf8d617d9932171b9224d8f44d1471e28d90be2b.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include "definitions.h"
#include "cudaFunctions.h"
/*
find the best mutant, the best one will return by pointer
return True if succeed, otherwise - False
*/
__host__ BOOL computeOnGPU(Mutant* bestMutant, char* seq1, char* seq2, int lenSeq1, int lenSeq2,
double weights[], BOOL isMax, int startOffset,int endOffset)
{
int numOfOffsetsToCalc = endOffset - startOffset + 1;
if(numOfOffsetsToCalc == 0) // if is it equal to 0, no need to do somthing
return True;
// error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// determine the size of memory to allocate
int sizeMemSeq1 = lenSeq1 * sizeof(char);
int sizeMemSeq2 = lenSeq2 * sizeof(char);
int sizeWeights = 4 * sizeof(double);
int sizeResults = numOfOffsetsToCalc * sizeof(CudaResult);
// allocate on GPU
char* gpuSeq1;
char* gpuSeq2;
double* gpuWeights;
CudaResult* gpuResults;
err = cudaMalloc((void**) &gpuSeq1, sizeMemSeq1);
if (err != cudaSuccess)
{
fprintf(stderr, "failed to allocate device memory - %s\n", cudaGetErrorString(err));
return False;
}
err = cudaMalloc((void**) &gpuSeq2, sizeMemSeq2);
if (err != cudaSuccess)
{
cudaFree(gpuSeq1);
fprintf(stderr, "failed to allocate device memory - %s\n", cudaGetErrorString(err));
return False;
}
err = cudaMalloc((void**) &gpuWeights, sizeWeights);
if (err != cudaSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, NULL);
fprintf(stderr, "failed to allocate device memory - %s\n", cudaGetErrorString(err));
return False;
}
err = cudaMalloc((void**) &gpuResults, sizeResults);
if (err != cudaSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, NULL);
fprintf(stderr, "failed to allocate device memory - %s\n", cudaGetErrorString(err));
return False;
}
// copy data from host to the GPU memory
if(cudaMemcpy(gpuSeq1, seq1, sizeMemSeq1, cudaMemcpyHostToDevice) != cudaSuccess
|| cudaMemcpy(gpuSeq2, seq2, sizeMemSeq2, cudaMemcpyHostToDevice) != cudaSuccess
|| cudaMemcpy(gpuWeights, weights, sizeWeights, cudaMemcpyHostToDevice) != cudaSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, NULL);
fprintf(stderr, "failed to copy data from host to device \n");
return False;
}
// launch the Kernel
int threadsPerBlock = calcNumThreadsPerBlock(numOfOffsetsToCalc);
int blocksPerGrid = (numOfOffsetsToCalc + threadsPerBlock - 1) / threadsPerBlock;
findBestMutant<<<blocksPerGrid, threadsPerBlock>>>(gpuResults, gpuSeq1, gpuSeq2, lenSeq1, lenSeq2, gpuWeights,
isMax, startOffset, endOffset);
err = cudaGetLastError();
if (err != cudaSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, NULL);
fprintf(stderr, "failed to launch the kernel - %s\n", cudaGetErrorString(err));
return False;
}
CudaResult* cpuResults = (CudaResult*) malloc(sizeResults);
if(!cpuResults)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, NULL);
fprintf(stderr, "malloc failed\n");
return False;
}
err = cudaMemcpy(cpuResults, gpuResults,sizeResults, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, cpuResults, NULL);
fprintf(stderr, "failed to copy result from device to host - %s\n", cudaGetErrorString(err));
return False;
}
for(int i = 0; i < numOfOffsetsToCalc; i ++)
{
// check if better result found
if ((isMax && cpuResults[i].score > bestMutant->score) || // looking for highest score
(!isMax && cpuResults[i].score < bestMutant->score)) // looking for lowest score
{
bestMutant->score = cpuResults[i].score;
bestMutant->offset = cpuResults[i].mutantOffset;
// check if substitute char is needed
if(cpuResults[i].charToPut != '-')
substituteChar(bestMutant, cpuResults, i, seq2, lenSeq2);
}
}
cudaFreeAll(gpuSeq1, gpuSeq2, gpuWeights, gpuResults, NULL);
return True;
}
__host__ int calcNumThreadsPerBlock(int numOfOffsetsToCalc)
{
// calculate threads needed and find power of 2 that fits the job
int numThreadsNeeded = 1;
while (numThreadsNeeded < numOfOffsetsToCalc)
numThreadsNeeded *= 2;
int threadsInBlock = 1;
int i = threadsInBlock * 2;
while(i < 1024 + 1)
{
if(numThreadsNeeded % numOfOffsetsToCalc > numThreadsNeeded % i)
threadsInBlock = i;
i *= 2;
}
return threadsInBlock;
}
/*
substitute a character based on given results
this functuion invoked only after we checked if we need to substitute at all
*/
__host__ void substituteChar(Mutant* mutant, CudaResult* result, int i, char* seq, int lenSeq)
{
memcpy(mutant->mutantSeq, seq, lenSeq * sizeof(char));
mutant->mutantSeq[lenSeq+1] = '\0';
mutant->mutantSeq[result[i].charOffset] = result[i].charToPut;
}
__global__ void findBestMutant(CudaResult* result, char* seq1, char* seq2, int lenSeq1, int lenSeq2,
double weights[], BOOL isMax, int startOffset,int endOffset)
{
int tId = blockIdx.x * blockDim.x + threadIdx.x;
int offset = tId + startOffset;
if(offset < startOffset || offset > endOffset)
return;
Mutant* foundedMutant = findMutantBestScoreGivenOffset(seq1, seq2, lenSeq1, lenSeq2, offset, weights, isMax);
if(!foundedMutant)
return;
// initialization
result[tId].charToPut = '-';
result[tId].charOffset = -1;
for(int i = 0; i < lenSeq2; i++)
{
if (foundedMutant->mutantSeq[i] != seq2[i])
{
result[tId].charToPut = foundedMutant->mutantSeq[i];
result[tId].charOffset = i;
}
}
result[tId].mutantOffset = offset;
result[tId].score = foundedMutant->score;
cudaFreeMutant(foundedMutant);
}
/*
given an offset, search for a best mutant as possible
returns a pointer to a Mutant
*/
__host__ __device__ Mutant* findMutantBestScoreGivenOffset(char* seq1, char* seq2, int lenSeq1, int lenSeq2,
int offset, double weights[], BOOL isMax)
{
char* resultsSeq;
double effect = 0;
double checkedEffect = 0;
Mutant* bestMutant = (Mutant*) malloc(sizeof(Mutant));
if (!bestMutant)
{
printf("malloc failed\n");
return NULL;
}
// initialize
bestMutant->mutantSeq = (char*)malloc(lenSeq2 * sizeof(char) + 1);
if (!bestMutant->mutantSeq)
{
printf("malloc failed\n");
return NULL;
}
memcpy(bestMutant->mutantSeq, seq2, lenSeq2 * sizeof(char));
bestMutant->length = lenSeq2;
resultsSeq = getResultsSequence(seq1, seq2, lenSeq1, lenSeq2, offset);
double startScore = calcScore(resultsSeq,lenSeq2,weights);
bestMutant->score = startScore;
const char alphabet[ALPHABET_LEN] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O',
'P','Q','R','S','T','U','V','W','X','Y','Z'};
// run all over seq2
for (int i = 0; i < lenSeq2; i++)
{
// run all over the alphabet
for (int j = 0; j < ALPHABET_LEN; j++)
{
// if both are not in conservative groups, then we can substitute a character
if (!inConservativeGroups(alphabet[j], seq2[i]))
{
checkedEffect = calcChangeBetweenTwoChars(seq1[i + offset], seq2[i], alphabet[j], weights);
// check if we found a better one
if ((isMax && checkedEffect > effect) || // looking for highest score
(!isMax && checkedEffect < effect) ) // looking for lowest score
{
bestMutant->mutantSeq[i] = alphabet[j]; // substitute
effect = checkedEffect;
}
}
}
}
bestMutant->score = startScore + effect;
return bestMutant;
}
__host__ __device__ char getSign(char c1, char c2)
{
if (c1 == c2)
return '*';
if (inConservativeGroups(c1, c2))
return ':';
if (inSemiConservativeGroups(c1, c2))
return '.';
if (c2 == '-')
return '-';
return ' ';
}
__host__ __device__ char* getResultsSequence(char* seq1, char* seq2 , int size1, int size2, int offset)
{
char* results = (char*)malloc(size2 * sizeof(char) + 1);
if(!results)
{
printf("malloc failed\n");
return NULL;
}
for (int i = 0; i < size2; i++)
results[i] = getSign(seq1[i + offset], seq2[i]);
return results;
}
__host__ __device__ double calcScore(char* results, int resultsLen, double* weights)
{
int stars = 0;
int colons = 0;
int points = 0;
int spaces = 0;
double totalScore = 0;
for (int i = 0; i < resultsLen + 1; i++)
{
if (results[i] == '*')
stars++;
else if (results[i] == ':')
colons++;
else if (results[i] == '.')
points++;
else if (results[i] == ' ')
spaces++;
}
totalScore = weights[0] * stars - weights[1] * colons - weights[2] * points - weights[3] * spaces;
return totalScore;
}
/*
calculate how much one letter from the alphabet affects the result
in relation to the letter from seq2
*/
__host__ __device__ double calcChangeBetweenTwoChars(char seq1Char, char seq2Char, char anotherChar, double weights[])
{
if(anotherChar == '-')
return 0;
double currentScore;
if(seq2Char == seq1Char) // star
currentScore = weights[0];
else if(inConservativeGroups(seq2Char,seq1Char)) // colon
currentScore = -weights[1];
else if(inSemiConservativeGroups(seq2Char,seq1Char)) // point
currentScore = -weights[2];
else // space
currentScore = -weights[3];
double checkedScore;
if(anotherChar == seq1Char)
checkedScore = weights[0];
else if(inConservativeGroups(anotherChar,seq1Char))
checkedScore = -weights[1];
else if(inSemiConservativeGroups(anotherChar,seq1Char))
checkedScore = -weights[2];
else
checkedScore = -weights[3];
return checkedScore - currentScore;
}
__host__ __device__ BOOL inConservativeGroups(char c1, char c2)
{
const char* ConservativeGroups[CONSERVATIVE_GROUPS_LEN] = {"NDEQ", "NEQK", "STA", "MILV", "QHRK", "NHQK",
"FYW", "HY", "MILF"};
for (int i = 0; i < CONSERVATIVE_GROUPS_LEN; i++)
if (isExistInSeq(ConservativeGroups[i], c1, c2))
return True;
return False;
}
__host__ __device__ BOOL inSemiConservativeGroups(char c1, char c2)
{
const char* SemiConservativeGroups[SEMI_CONSERVATIVE_GROUPS_LEN] = {"SAG", "ATV", "CSA", "SGND", "STPA", "STNK",
"NEQHRK", "NDEQHK", "SNDEQK", "HFY", "FVLIM"};
for (int i = 0; i < SEMI_CONSERVATIVE_GROUPS_LEN; i++)
if (isExistInSeq(SemiConservativeGroups[i], c1, c2))
return True;
return False;
}
__host__ __device__ BOOL isExistInSeq(const char* seq, char c1, char c2)
{
if (strChar(seq, c1) && strChar(seq, c2))
return True;
return False;
}
__host__ __device__ int strLength(const char *str)
{
int length = 0;
while (*str++)
length++;
return length;
}
__host__ __device__ int strCompare(const char* s1, const char* s2)
{
while(*s1 && (*s1 == *s2))
{
s1++;
s2++;
}
return *(const unsigned char*)s1 - *(const unsigned char*)s2;
}
__host__ __device__ const char* strChar(const char* s, const char c)
{
while(*s != c && *s != '\0')
s++;
if (*s == c)
return s;
return NULL;
}
__host__ void cudaFreeAll(void *ptr, ...)
{
va_list args;
va_start(args, ptr);
while (ptr) // until ptr != NULL
{
cudaError_t err = cudaFree(ptr);
if (err != cudaSuccess)
{
fprintf(stderr, "failed to free device data\n");
return;
}
ptr = va_arg(args, void*); //get next var
}
va_end(args);
}
__device__ void cudaFreeMutant(Mutant* mutant)
{
free(mutant->mutantSeq);
free(mutant);
} |
687b066c4d3a2d2d061865404c1b0ab5c5cc13d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void sgemm_nn_128x64(
float* param_C,
const float* param_A,
const float* param_B,
float param_alpha,
float param_beta,
int param_flags,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
) {
__shared__ float share[128*8*2 + 64*8*2 + 4];
*param_C = share[0];
}
| 687b066c4d3a2d2d061865404c1b0ab5c5cc13d7.cu |
extern "C" __global__ void sgemm_nn_128x64(
float* param_C,
const float* param_A,
const float* param_B,
float param_alpha,
float param_beta,
int param_flags,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
) {
__shared__ float share[128*8*2 + 64*8*2 + 4];
*param_C = share[0];
}
|
4d40be50e311dc62b752306f8ba7a33cd2f40491.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void KerCalcRidp(unsigned n,unsigned ini,unsigned idini,unsigned idfin,const unsigned *idp,unsigned *ridp)
{
unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Number of particle.
if(p<n){
p+=ini;
const unsigned id=idp[p];
if(idini<=id && id<idfin)ridp[id-idini]=p;
}
} | 4d40be50e311dc62b752306f8ba7a33cd2f40491.cu | #include "includes.h"
__global__ void KerCalcRidp(unsigned n,unsigned ini,unsigned idini,unsigned idfin,const unsigned *idp,unsigned *ridp)
{
unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Number of particle.
if(p<n){
p+=ini;
const unsigned id=idp[p];
if(idini<=id && id<idfin)ridp[id-idini]=p;
}
} |
dbb7671ea6a2eaf2302d761f759157c56733555b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
// Check tensor dimensions for index operations, and return the slice size.
// src can be nullptr in case of indexFill: in that case it is ignored.
static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst,
int dim,
THCudaLongTensor *index,
THCTensor *src)
{
int dstDims = THCTensor_(_nDimension)(state, dst);
int srcDims = (src == nullptr) ? dstDims : THCTensor_(_nDimension)(state, src);
THArgCheck(THCudaLongTensor__nDimension(state, index) == 1, 4,
"expecting vector of indices");
THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds");
ptrdiff_t dstSliceSize = 1;
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst->size(d);
}
}
if (src == nullptr) return dstSliceSize;
THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds");
THArgCheck(THCudaLongTensor_nElement(state, index) == src->size(dim), 4,
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src->size(d);
if (!mismatch && dst->size(d) != src->size(d)) mismatch = true;
}
}
THArgCheck(dstSliceSize == srcSliceSize, 2,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
static bool warningShown = false;
if (!warningShown) {
warningShown = true;
fprintf(stderr,
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
}
return dstSliceSize;
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
bool THCTensor_(indexShouldBeMajor)(TensorInfo<real, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexCopyLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstCopyDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(_nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(_nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor__nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(!(THCTensor_(_nDimension)(state, src) == 0 && THCudaLongTensor__nDimension(state, index) != 0), 2,
"tried to take from an empty tensor");
THCTensor_(resizeNd)(state, dst, index->dim(), THTensor_getSizePtr(index), NULL);
// dispatchTakePut only handles non-empty tensors;
if (index->_dim() > 0) {
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(_nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(_nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor__nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
if (numIndices == 0) {
return;
}
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstAddDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize =
THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr);
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize * numIndices, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstFillDimSize, val);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim);
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2, true);
} else if (dstInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, -2, false);
}
} else if (dstInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(_nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor__nDimension(state, indices) <= 1, 3,
"Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize;
if (numIndices == 0) {
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, 0, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
return;
}
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
srcSelectDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#endif
| dbb7671ea6a2eaf2302d761f759157c56733555b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
// Check tensor dimensions for index operations, and return the slice size.
// src can be nullptr in case of indexFill: in that case it is ignored.
static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst,
int dim,
THCudaLongTensor *index,
THCTensor *src)
{
int dstDims = THCTensor_(_nDimension)(state, dst);
int srcDims = (src == nullptr) ? dstDims : THCTensor_(_nDimension)(state, src);
THArgCheck(THCudaLongTensor__nDimension(state, index) == 1, 4,
"expecting vector of indices");
THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds");
ptrdiff_t dstSliceSize = 1;
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst->size(d);
}
}
if (src == nullptr) return dstSliceSize;
THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds");
THArgCheck(THCudaLongTensor_nElement(state, index) == src->size(dim), 4,
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src->size(d);
if (!mismatch && dst->size(d) != src->size(d)) mismatch = true;
}
}
THArgCheck(dstSliceSize == srcSliceSize, 2,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
static bool warningShown = false;
if (!warningShown) {
warningShown = true;
fprintf(stderr,
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
}
return dstSliceSize;
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
bool THCTensor_(indexShouldBeMajor)(TensorInfo<real, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexCopyLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstCopyDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(_nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(_nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor__nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(!(THCTensor_(_nDimension)(state, src) == 0 && THCudaLongTensor__nDimension(state, index) != 0), 2,
"tried to take from an empty tensor");
THCTensor_(resizeNd)(state, dst, index->dim(), THTensor_getSizePtr(index), NULL);
// dispatchTakePut only handles non-empty tensors;
if (index->_dim() > 0) {
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(_nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(_nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor__nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
if (numIndices == 0) {
return;
}
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstAddDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize =
THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr);
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize * numIndices, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstFillDimSize, val);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim);
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2, true);
} else if (dstInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, -2, false);
}
} else if (dstInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(_nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor__nDimension(state, indices) <= 1, 3,
"Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize;
if (numIndices == 0) {
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, 0, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
return;
}
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
srcSelectDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#endif
|
8bee36ae1c4a8fd39cfab5d048698825f67c7c5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
| 8bee36ae1c4a8fd39cfab5d048698825f67c7c5f.cu | #include "cuda_runtime.h"
#include <stdint.h>
|
a9807f9f94836630aa1d28f3585c613862c1007b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/hip/Resize.cuh>
#include <c10/util/Exception.h>
#include <THH/THHGeneral.h>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
AT_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options) {
AT_ASSERT(options.backend() == at::Backend::CUDA);
AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged
AT_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId(), false);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
AT_CHECK(n >= 0, "n must be non-negative, got", n);
AT_CHECK(at::scalar_tensor(n, result.options()).defined(),
"n is too large for result tensor type: '", result.type().toString(), "'");
result.resize_({n});
if (result.scalar_type() == at::ScalarType::Half) {
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
result.copy_(randperm_out_cuda(result_float, n, generator));
} else {
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
result.copy_(result_cpu);
} else {
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto result_data = thrust::device_ptr<scalar_t>(result.data<scalar_t>());
auto keys_data = thrust::device_ptr<scalar_t>(keys.data<scalar_t>());
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::hip::par(thrustAlloc).on(at::hip::getCurrentHIPStreamMasqueradingAsCUDA());
thrust::sequence(policy, result_data, result_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, result_data);
}
);
}
}
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the convertion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
AT_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
hipLaunchKernelGGL(( tril_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
AT_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
hipLaunchKernelGGL(( triu_indices_kernel),
dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
tensor.data<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
| a9807f9f94836630aa1d28f3585c613862c1007b.cu | #include <ATen/ATen.h>
#include <ATen/InitialTensorOptions.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/cuda/Resize.cuh>
#include <c10/util/Exception.h>
#include <THC/THCGeneral.h>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstddef>
#include <cmath>
namespace at {
namespace native {
Tensor& eye_out_cuda(Tensor& result, int64_t n) {
return at::native::eye_out_cuda(result, n, /*m=*/-1);
}
Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) {
AT_CHECK(n >= 0, "n must be greater or equal to 0, got ", n);
if(m < 0) {
m = n;
}
result.resize_({n, m});
result.zero_();
int64_t sz = std::min<int64_t>(n, m);
int64_t stride = result.stride(0) + result.stride(1);
Tensor diag = result.as_strided({sz}, {stride});
diag.fill_(1);
return result;
}
Tensor empty_cuda(IntArrayRef size, const TensorOptions& options) {
AT_ASSERT(options.backend() == at::Backend::CUDA);
AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged
AT_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned");
check_size_nonnegative(size);
auto* allocator = at::cuda::getCUDADeviceAllocator();
int64_t nelements = prod_intlist(size);
auto dtype = options.dtype();
auto storage_impl = c10::make_intrusive<StorageImpl>(
dtype,
nelements,
allocator->allocate(nelements * dtype.itemsize()),
allocator,
/*resizeable=*/true);
auto tensor = detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId(), false);
// Default TensorImpl has size [0]
if (size.size() != 1 || size[0] != 0) {
tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size);
}
return tensor;
}
Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) {
auto t = at::native::empty_cuda({0}, options);
at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride);
return t;
}
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
AT_CHECK(n >= 0, "n must be non-negative, got", n);
AT_CHECK(at::scalar_tensor(n, result.options()).defined(),
"n is too large for result tensor type: '", result.type().toString(), "'");
result.resize_({n});
if (result.scalar_type() == at::ScalarType::Half) {
auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA)));
result.copy_(randperm_out_cuda(result_float, n, generator));
} else {
if (n < 30000) { // For small inputs, we offload it to CPU instead.
auto result_cpu = at::empty({n}, result.options().device(kCPU));
randperm_out(result_cpu, n, generator);
result.copy_(result_cpu);
} else {
// Generate random values for the keys array
AT_DISPATCH_ALL_TYPES(
result.scalar_type(), "randperm_out_cuda", [&] {
auto keys = at::empty(result.sizes(), result.options()).random_(generator);
auto result_data = thrust::device_ptr<scalar_t>(result.data<scalar_t>());
auto keys_data = thrust::device_ptr<scalar_t>(keys.data<scalar_t>());
auto state = globalContext().getTHCState();
THCThrustAllocator thrustAlloc(state);
auto policy = thrust::cuda::par(thrustAlloc).on(at::cuda::getCurrentCUDAStream());
thrust::sequence(policy, result_data, result_data + n);
// Use the sorted order of keys to rearrange the result array
thrust::sort_by_key(policy, keys_data, keys_data + n, result_data);
}
);
}
}
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
namespace {
// To find the max integer that does not exceed the root of an int64_t variable,
// we could use a loop to test one bit at a time, which takes up to 31
// iterations. This would give the accurate result, but is relatively slow and
// is an overkill for most cases where double's precision suffice.
//
// If we directly use sqrt to calculate the root, the convertion from int64_t
// to double would lose 11 bits precision.
//
// The following solution uses sqrt directly for most cases, and would only
// special handle it if there is indeed precision loss.
__device__
inline int64_t resolve_root_int(
int64_t b, int64_t cX4, int64_t x, int32_t sign) {
int64_t bXb_cX4 = b*b - cX4;
// potential precision loss could occur here when casting int64_t (63 bits
// precision) to double (52 bits precision)
double sr = ::sqrt((double)bXb_cX4);
int64_t res = ::__double2ll_rd((-b + sign * sr)/2);
// have to cast double to int64_t, otherwise it would only compare up to the
// precision of a double variable, ignoring the precision loss
if (bXb_cX4 != (int64_t) (sr * sr)) {
// handle precision loss by using binary search
int64_t llsr = ::__double2ll_rd(sr);
// Use the following math to reduce search space.
// Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss
// let d = abs(bXb_cX4 - llsr * llsr), then we have:
// z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d)
// z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d)
// Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)).
// And the true value of row would also be with in range,
// [res - sqrt(d), res + sqrt(d) + 1)
// as the denominator would only reduce the precision penalty.
int64_t diff =
::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr))));
// l never exceeds (could equal to) the target row index
auto l = res > diff ? res - diff : 0;
// r is always larger than the target row index
auto r = res + diff + 1;
// binary search for the correct answer
x <<= 1; // the loop always compares with 2x, so do it once here
while (l + 1 < r) {
auto m = (l + r) >> 1;
// for tril:
// b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2
// for triu:
// b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2
if (sign * (b + m) * m > x) {
r = m;
} else {
l = m;
}
}
res = l;
}
return res;
}
// f: the number of elements in the first row of the trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the tril as a top trapezoid stacked on a bottom rectangle. Assume x
// corresponds to the coordinate (row, col) in the trapezoid, where the row and
// the col both start from 0, then we have:
//
// (f + f + row - 1) * row / 2 <= x [1]
// (f + f + row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (row + 2f - 1)row <= 2x
// row^2 + (2f-1)row - 2x <= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = 2f - 1
// c = -2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the right. Intuitively, it is because:
// i) the valid solution range of row is between two roots, as it is <= 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 + (2f-1)row - 2x.
// Therefore, the valid range of row lies in between the nadir point and
// the larger root on the right.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b + sqrt(b^2 - 4c)) / 2)
// col = x - (f + f + row - 1) * row / 2
__device__
inline void get_coordinate_in_tril_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = f - 1;
auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x;
row = resolve_root_int(b, cX4, x, 1);
col = x - ((f + row - 1) * row >> 1);
}
// f: the number of elements in the first row of the bottom trapezoid.
// x: the index of the target coordinates ordered by row and then column.
//
// View the triu as a top rectangle stacked on a bottom trapezoid, where the
// trapezoid is upside down. Assume x corresponds to the coordinate (row, col)
// in the bottom trapezoid, where the row and the col start from 0, then we
// have:
//
// (f + f - row + 1) * row / 2 <= x [1]
// (f + f - row) * (row + 1) / 2 > x [2]
//
// Therefore, row is the maximum integer satisfying the following inequality:
//
// (-row + 2f + 1)row <= 2x
// row^2 - (2f+1)row + 2x >= 0. [3]
//
// Based on ineuqality [3], we have the following coefficients for formula of
// root:
// a = 1
// b = -1 - 2f
// c = 2x
// There are two roots, and we should use the largest integer that does not
// exceed the root on the left. Intuitively, it is because:
// i) the valid solution range of row is outside of the two roots, as it is <
// > 0;
// ii) as we count in more rows, the total # of elements should always
// increase, hence so does the left-hand side row^2 - (2f+1)row + 2x.
// Therefore, the valid range of row lies to the left of the smaller root
// on the left.
// Full proof can be derived from inequality [2]. So, we calculate the result
// coordinate as:
//
// row = floor((-b - sqrt(b^2 - 4c)) / 2)
// col = x - (f + f - row + 1) * row / 2
__device__
inline void get_coordinate_in_triu_trapezoid(
int64_t f, int64_t x, int64_t & row, int64_t & col) {
f <<= 1; // all statements use 2f, so only calculate it once here.
auto b = -1 - f;
auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x;
row = resolve_root_int(b, cX4, x, -1);
col = x - ((f - row + 1) * row >> 1) + row;
}
} // namespace
template <typename scalar_t>
__global__
void tril_indices_kernel(scalar_t * tensor,
int64_t row_offset,
int64_t m_first_row,
int64_t col,
int64_t trapezoid_size,
int64_t tril_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < tril_size) {
int64_t r, c;
if (linear_index < trapezoid_size) {
// the coordinate is within the top trapezoid
get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c);
} else {
// the coordinate falls in the bottom rectangle
auto surplus = linear_index - trapezoid_size;
// add the height of trapezoid: m_last_row (col) - m_first_row + 1
r = surplus / col + col - m_first_row + 1;
c = surplus % col;
}
r += row_offset;
tensor[linear_index] = r;
tensor[linear_index + tril_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor tril_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto tril_size = get_tril_size(row, col, offset);
auto tensor = empty_cuda({2, tril_size}, options);
if (tril_size > 0) {
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
auto trapezoid_row_offset = std::max<int64_t>(0, -offset);
auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1;
int64_t rectangle_size = 0;
if (rectangle_row_offset < row) {
rectangle_size = (row - rectangle_row_offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using tril_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
AT_CHECK(
cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] {
tril_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data<scalar_t>(),
trapezoid_row_offset,
m_first_row,
col,
tril_size - rectangle_size,
tril_size);
});
}
return tensor;
}
template <typename scalar_t>
__global__
void triu_indices_kernel(scalar_t * tensor,
int64_t col_offset,
int64_t m_first_row,
int64_t col,
int64_t rectangle_size,
int64_t triu_size) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index < triu_size) {
int64_t r, c;
if (linear_index < rectangle_size) {
// the coordinate is within the top rectangle
r = linear_index / col;
c = linear_index % col;
} else {
// the coordinate falls in the bottom trapezoid
get_coordinate_in_triu_trapezoid(
m_first_row, linear_index - rectangle_size, r, c);
r += rectangle_size / col;
}
c += col_offset;
tensor[linear_index] = r;
tensor[linear_index + triu_size] = c;
}
}
// Some Large test cases for the fallback binary search path is disabled by
// default to speed up CI tests and to avoid OOM error. When modifying the
// implementation, please enable them in test/test_cuda.py and make sure they
// pass on your local server.
Tensor triu_indices_cuda(
int64_t row, int64_t col, int64_t offset, const TensorOptions& options) {
check_args(row, col, options);
auto triu_size = row * col - get_tril_size(row, col, offset - 1);
auto tensor = empty_cuda({2, triu_size}, options);
if (triu_size > 0) {
// # of triu elements in the first row
auto m_first_row = offset > 0 ?
std::max<int64_t>(col - offset, 0) : // upper bounded by col
col;
// size of the top rectangle
int64_t rectangle_size = 0;
if (offset < 0) {
rectangle_size = std::min<int64_t>(row, -offset) * col;
}
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
// using triu_size instead of tensor.numel(), as each thread takes care of
// two elements in the tensor.
AT_CHECK(
cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()),
"unable to get dim grid");
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] {
triu_indices_kernel<<<
dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
tensor.data<scalar_t>(),
std::max<int64_t>(0, offset),
m_first_row,
col,
rectangle_size,
triu_size);
});
}
return tensor;
}
}} // namespace at::native
|
00e6620b8369ca6bb37c1dc33fb36211872dcc84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SDL2/SDL.h"
#include "hiprand/hiprand.h"
//#define pos(x, y) (x + 1920*y)
const dim3 threads(128, 8, 1);
const dim3 blocks(15, 135, 1);
__device__ uint32_t pos(uint32_t x, uint32_t y)
{
return (x + 1920*y);
}
__global__ void solidify(uint8_t* arr)
{
unsigned int ind = pos(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (arr[ind] % 2 == 1)
arr[ind] = 0xff;
else
arr[ind] = 0;
}
__global__ void conway(const uint8_t* in, uint8_t* out)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
int neighbors = 0;
if (x > 0)
{
if (y > 0) if (in[pos(x - 1, y - 1)] > 0)
neighbors++;
if (in[pos(x -1, y)] > 0)
neighbors++;
if (y < 1079) if (in[pos(x -1, y + 1)] > 0)
neighbors++;
}
if (y > 0) if (in[pos(x, y - 1)] > 0)
neighbors++;
if (y < 1079) if (in[pos(x, y + 1)] > 0)
neighbors++;
if (x < 1919)
{
if (y > 0) if (in[pos(x + 1, y - 1)] > 0)
neighbors++;
if (in[pos(x + 1, y)] > 0)
neighbors++;
if (y < 1079) if (in[pos(x + 1, y + 1)] > 0)
neighbors++;
}
/*
for (int i = -1; i < 2; i++)
if (x >= -i && x + i < 1920)
for (int j = -1; j < 2; j++)
if (y >= -j && y + j < 1080)
if (in[pos(x + i, y + j)] > 0)
neighbors++;*/
if (neighbors == 2)
out[pos(x, y)] = in[pos(x, y)];
else if (neighbors == 3)
out[pos(x, y)] = 0xff;
else out[pos(x,y)] = 0;
}
__global__ void draw(const uint8_t* in, uint32_t* out)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y;
if(in[pos(x, y)] == 0xff)
out[pos(x,y)] = 0x00006400;
else
out[pos(x, y)] = 0x00141414 ;
}
int main(int argc, char* argv[])
{
SDL_Init(SDL_INIT_EVERYTHING);
uint8_t* board;
uint8_t* buffer;
uint32_t* colours;
hiprandGenerator_t gen;
if (hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_XORWOW) !=HIPRAND_STATUS_SUCCESS) SDL_Log("WE FUCKED UP: %s", hipGetErrorString(hipGetLastError()));
hiprandSetPseudoRandomGeneratorSeed(gen, rand());
hipError_t err;
if (hipSetDevice(0) != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
SDL_Window* win = SDL_CreateWindow("conway's game of cuda", 0, 0, 1920, 1080,
SDL_WINDOW_INPUT_FOCUS | SDL_WINDOW_MOUSE_FOCUS |
0 | SDL_WINDOW_ALLOW_HIGHDPI |
SDL_WINDOW_SKIP_TASKBAR | SDL_WINDOW_SHOWN );
SDL_Surface* sur = SDL_GetWindowSurface(win);
SDL_Log("surdim : %i, surf: bpp: %i, fmt: %i, %s, h: %i, pitch: %i, pitch/4 : %f", sur->pitch * sur->h / 4, sur->format->BitsPerPixel, sur->format->format, SDL_GetPixelFormatName(sur->format->format), sur->h, sur->pitch, sur->pitch / 4.);
err = hipMalloc(&board, 1080 * 1920); if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
err = hipMalloc(&buffer, 1080 * 1920); if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
err = hipMalloc(&colours, 4 * 1080 * 1920); if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
if (hiprandGenerate(gen, reinterpret_cast<unsigned int *>(board), 1080 * 1920 / 4) !=HIPRAND_STATUS_SUCCESS) SDL_Log("RANDOM FUCKED UP");
hipLaunchKernelGGL(( solidify), dim3(blocks), dim3(threads), 0, 0, board);
err = hipDeviceSynchronize();
if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
bool running = true;
bool pause = false;
int time;
while (running)
{
time = SDL_GetTicks();
SDL_Event e;
while(SDL_PollEvent(&e)) {
switch(e.type) {
case SDL_QUIT:
running = false;
break;
case SDL_KEYUP:
switch (e.key.keysym.sym) {
case SDLK_r:
if (hiprandGenerate(gen, reinterpret_cast<unsigned int *>(board), 1080 * 1920 / 4) !=
HIPRAND_STATUS_SUCCESS)
SDL_Log("RANDOM FUCKED UP");
hipLaunchKernelGGL(( solidify), dim3(blocks), dim3(threads), 0, 0, board);
err = hipDeviceSynchronize();
if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
break;
case SDLK_SPACE:
pause = !pause;
break;
case SDLK_q:
running = false;
case SDLK_c:
hipMemset(board, 0, 1920 * 1080);
default:
break;
}
break;
case SDL_MOUSEMOTION:
if (e.motion.state & SDL_BUTTON_RMASK) {
err = hipMemset(board + e.motion.x + 1920 * e.motion.y, 0x00, 2);
if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
} else if (e.motion.state & SDL_BUTTON_LMASK) {
err = hipMemset(board + e.motion.x + 1920 * e.motion.y, 0xff, 2);
if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
}
break;
default:
break;
}
}
//SDL_Log("before");
if (!pause) {
hipLaunchKernelGGL(( conway), dim3(blocks), dim3(threads), 0, 0, board, buffer);
err = hipDeviceSynchronize();
if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
//SDL_Log("%s", hipGetErrorString(hipGetLastError()));
uint8_t* temp = buffer;
buffer = board;
board = temp;
}
hipLaunchKernelGGL(( draw), dim3(blocks), dim3(threads), 0, 0, board, colours);
err = hipDeviceSynchronize();
if (err != hipSuccess) SDL_Log("%s", hipGetErrorString(err));
//SDL_Log("helo");
SDL_LockSurface(sur);
hipMemcpy(sur->pixels, colours, 4* 1080 * 1920, hipMemcpyDeviceToHost);
SDL_UpdateWindowSurface(win);
SDL_UnlockSurface(sur);
// while (SDL_GetTicks() - time < 4);
// SDL_Delay(1);
// SDL_Log("here\n\n");
// SDL_Log("frametime: %i", SDL_GetTicks() - time);
}
hiprandDestroyGenerator(gen);
hipFree(board);
hipFree(buffer);
hipFree(colours);
SDL_DestroyWindow(win);
SDL_Quit();
return 0;
} | 00e6620b8369ca6bb37c1dc33fb36211872dcc84.cu | #include "cuda.h"
#include "SDL2/SDL.h"
#include "curand.h"
//#define pos(x, y) (x + 1920*y)
const dim3 threads(128, 8, 1);
const dim3 blocks(15, 135, 1);
__device__ uint32_t pos(uint32_t x, uint32_t y)
{
return (x + 1920*y);
}
__global__ void solidify(uint8_t* arr)
{
unsigned int ind = pos(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (arr[ind] % 2 == 1)
arr[ind] = 0xff;
else
arr[ind] = 0;
}
__global__ void conway(const uint8_t* in, uint8_t* out)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
int neighbors = 0;
if (x > 0)
{
if (y > 0) if (in[pos(x - 1, y - 1)] > 0)
neighbors++;
if (in[pos(x -1, y)] > 0)
neighbors++;
if (y < 1079) if (in[pos(x -1, y + 1)] > 0)
neighbors++;
}
if (y > 0) if (in[pos(x, y - 1)] > 0)
neighbors++;
if (y < 1079) if (in[pos(x, y + 1)] > 0)
neighbors++;
if (x < 1919)
{
if (y > 0) if (in[pos(x + 1, y - 1)] > 0)
neighbors++;
if (in[pos(x + 1, y)] > 0)
neighbors++;
if (y < 1079) if (in[pos(x + 1, y + 1)] > 0)
neighbors++;
}
/*
for (int i = -1; i < 2; i++)
if (x >= -i && x + i < 1920)
for (int j = -1; j < 2; j++)
if (y >= -j && y + j < 1080)
if (in[pos(x + i, y + j)] > 0)
neighbors++;*/
if (neighbors == 2)
out[pos(x, y)] = in[pos(x, y)];
else if (neighbors == 3)
out[pos(x, y)] = 0xff;
else out[pos(x,y)] = 0;
}
__global__ void draw(const uint8_t* in, uint32_t* out)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x, y = blockIdx.y * blockDim.y + threadIdx.y;
if(in[pos(x, y)] == 0xff)
out[pos(x,y)] = 0x00006400;
else
out[pos(x, y)] = 0x00141414 ;
}
int main(int argc, char* argv[])
{
SDL_Init(SDL_INIT_EVERYTHING);
uint8_t* board;
uint8_t* buffer;
uint32_t* colours;
curandGenerator_t gen;
if (curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_XORWOW) !=CURAND_STATUS_SUCCESS) SDL_Log("WE FUCKED UP: %s", cudaGetErrorString(cudaGetLastError()));
curandSetPseudoRandomGeneratorSeed(gen, rand());
cudaError_t err;
if (cudaSetDevice(0) != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
SDL_Window* win = SDL_CreateWindow("conway's game of cuda", 0, 0, 1920, 1080,
SDL_WINDOW_INPUT_FOCUS | SDL_WINDOW_MOUSE_FOCUS |
0 | SDL_WINDOW_ALLOW_HIGHDPI |
SDL_WINDOW_SKIP_TASKBAR | SDL_WINDOW_SHOWN );
SDL_Surface* sur = SDL_GetWindowSurface(win);
SDL_Log("surdim : %i, surf: bpp: %i, fmt: %i, %s, h: %i, pitch: %i, pitch/4 : %f", sur->pitch * sur->h / 4, sur->format->BitsPerPixel, sur->format->format, SDL_GetPixelFormatName(sur->format->format), sur->h, sur->pitch, sur->pitch / 4.);
err = cudaMalloc(&board, 1080 * 1920); if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
err = cudaMalloc(&buffer, 1080 * 1920); if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
err = cudaMalloc(&colours, 4 * 1080 * 1920); if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
if (curandGenerate(gen, reinterpret_cast<unsigned int *>(board), 1080 * 1920 / 4) !=CURAND_STATUS_SUCCESS) SDL_Log("RANDOM FUCKED UP");
solidify<<<blocks, threads>>>(board);
err = cudaDeviceSynchronize();
if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
bool running = true;
bool pause = false;
int time;
while (running)
{
time = SDL_GetTicks();
SDL_Event e;
while(SDL_PollEvent(&e)) {
switch(e.type) {
case SDL_QUIT:
running = false;
break;
case SDL_KEYUP:
switch (e.key.keysym.sym) {
case SDLK_r:
if (curandGenerate(gen, reinterpret_cast<unsigned int *>(board), 1080 * 1920 / 4) !=
CURAND_STATUS_SUCCESS)
SDL_Log("RANDOM FUCKED UP");
solidify<<<blocks, threads>>>(board);
err = cudaDeviceSynchronize();
if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
break;
case SDLK_SPACE:
pause = !pause;
break;
case SDLK_q:
running = false;
case SDLK_c:
cudaMemset(board, 0, 1920 * 1080);
default:
break;
}
break;
case SDL_MOUSEMOTION:
if (e.motion.state & SDL_BUTTON_RMASK) {
err = cudaMemset(board + e.motion.x + 1920 * e.motion.y, 0x00, 2);
if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
} else if (e.motion.state & SDL_BUTTON_LMASK) {
err = cudaMemset(board + e.motion.x + 1920 * e.motion.y, 0xff, 2);
if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
}
break;
default:
break;
}
}
//SDL_Log("before");
if (!pause) {
conway<<<blocks, threads>>>(board, buffer);
err = cudaDeviceSynchronize();
if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
//SDL_Log("%s", cudaGetErrorString(cudaGetLastError()));
uint8_t* temp = buffer;
buffer = board;
board = temp;
}
draw<<<blocks, threads>>>(board, colours);
err = cudaDeviceSynchronize();
if (err != cudaSuccess) SDL_Log("%s", cudaGetErrorString(err));
//SDL_Log("helo");
SDL_LockSurface(sur);
cudaMemcpy(sur->pixels, colours, 4* 1080 * 1920, cudaMemcpyDeviceToHost);
SDL_UpdateWindowSurface(win);
SDL_UnlockSurface(sur);
// while (SDL_GetTicks() - time < 4);
// SDL_Delay(1);
// SDL_Log("here\n\n");
// SDL_Log("frametime: %i", SDL_GetTicks() - time);
}
curandDestroyGenerator(gen);
cudaFree(board);
cudaFree(buffer);
cudaFree(colours);
SDL_DestroyWindow(win);
SDL_Quit();
return 0;
} |
823b976182b660c57c1a752e49a4606b29b546fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/pipeline/data/types.h"
namespace dali {
namespace detail {
__global__ void CopyKernel(uint8_t *dst, const uint8_t *src, int64_t n) {
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
dst[i] = src[i];
}
}
void LaunchCopyKernel(void *dst, const void *src, int64_t nbytes, hipStream_t stream) {
unsigned block = std::min<int64_t>(nbytes, 1024);
unsigned grid = std::min<int64_t>(1024, div_ceil(static_cast<unsigned>(nbytes), block));
hipLaunchKernelGGL(( CopyKernel), dim3(grid), dim3(block), 0, stream, reinterpret_cast<uint8_t*>(dst),
reinterpret_cast<const uint8_t*>(src),
nbytes);
CUDA_CALL(hipGetLastError());
}
} // namespace detail
} // namespace dali
| 823b976182b660c57c1a752e49a4606b29b546fe.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/pipeline/data/types.h"
namespace dali {
namespace detail {
__global__ void CopyKernel(uint8_t *dst, const uint8_t *src, int64_t n) {
for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
dst[i] = src[i];
}
}
void LaunchCopyKernel(void *dst, const void *src, int64_t nbytes, cudaStream_t stream) {
unsigned block = std::min<int64_t>(nbytes, 1024);
unsigned grid = std::min<int64_t>(1024, div_ceil(static_cast<unsigned>(nbytes), block));
CopyKernel<<<grid, block, 0, stream>>>(reinterpret_cast<uint8_t*>(dst),
reinterpret_cast<const uint8_t*>(src),
nbytes);
CUDA_CALL(cudaGetLastError());
}
} // namespace detail
} // namespace dali
|
dc7f373806a2a98595592bdffd0d81de7ca98fc6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "hip/hip_runtime.h"
#include <hip/hip_runtime.h>
#include <string.h>
#include <math.h>
#include <mpfr.h>
#include <iostream>
using namespace std;
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
FILE *fp;
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(double *trix, int size){
int i,j;
double lamda = -0.01;
double cof[2*size-1];
double coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
cof[j]=coe_i;
j=size-1-i;
cof[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
trix[i*size+j]=cof[size-1-i+j];
}
}
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/*-------------------------------------------------------
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
**-------------------------------------------------------
*/
__global__ void Fan1(double *c_m, double *c_a, int Size, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
c_m[Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t] = c_a[Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t] / c_a[Size*t+t];
}
/*-------------------------------------------------------
**-------------------------------------------------------
*/
__global__ void Fan2(double *c_m, double *c_a, double *c_b,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
c_a[Size*(xidx+1+t)+(yidx+t)] -= c_m[Size*(xidx+1+t)+t] * c_a[Size*t+(yidx+t)];
if(yidx == 0){
c_b[xidx+1+t] -= c_m[Size*(xidx+1+t)+(yidx+t)] * c_b[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 0;
int i, j, t;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
int Size;
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
break;
}
}
}
double* ha = new double[Size * Size];
create_matrix(ha, Size);
double* hb = new double[Size];
for (j =0; j< Size; j++)
hb[j]=1.0;
double* hm = new double[Size * Size];
double* finalVec = new double[Size];
//InitProblemOnce(filename);
for (i=0; i<Size*Size; i++)
hm[i] = (double)0.0;
//begin timing
struct timeval start_t;
struct timeval end_t;
struct timeval skt_t;
struct timeval ske_t;
struct timeval sht_t;
struct timeval she_t;
gettimeofday(&start_t,0L);
double *cuda_m,*cuda_a,*cuda_b;
// allocate memory on GPU
hipMalloc((void **) &cuda_m, Size * Size * sizeof(double));
hipMalloc((void **) &cuda_a, Size * Size * sizeof(double));
hipMalloc((void **) &cuda_b, Size * sizeof(double));
// copy memory to GPU
hipMemcpy(cuda_m, hm, Size * Size * sizeof(double),hipMemcpyHostToDevice );
hipMemcpy(cuda_a, ha, Size * Size * sizeof(double),hipMemcpyHostToDevice );
hipMemcpy(cuda_b, hb, Size * sizeof(double),hipMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
gettimeofday(&skt_t,0L);
for (t=0; t<(Size-1); t++) {
hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, cuda_m,cuda_a,Size,t);
hipDeviceSynchronize();
hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, cuda_m,cuda_a,cuda_b,Size,Size-t,t);
hipDeviceSynchronize();
checkCUDAError("Fan2");
}
gettimeofday(&ske_t,0L);
// copy memory back to CPU
hipMemcpy(hm, cuda_m, Size * Size * sizeof(double),hipMemcpyDeviceToHost );
hipMemcpy(ha, cuda_a, Size * Size * sizeof(double),hipMemcpyDeviceToHost );
hipMemcpy(hb, cuda_b, Size * sizeof(double),hipMemcpyDeviceToHost );
//BackSub();
// create a new vector to hold the final answer
// solve "bottom up"
gettimeofday(&sht_t,0L);
for(i=0;i<Size;i++){
finalVec[Size-i-1]=hb[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=ha[Size*(Size-i-1)+(Size-j-1)] * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ ha[Size*(Size-i-1)+(Size-i-1)];
}
gettimeofday(&she_t,0L);
gettimeofday(&end_t,0L);
((std::cout<<"time: ") << ((end_t . tv_sec - start_t . tv_sec) + (end_t . tv_usec - start_t . tv_usec) * 1e-6)) << endl;
((std::cout<<"kernel: ") << ((ske_t . tv_sec - skt_t . tv_sec) + (ske_t . tv_usec - skt_t . tv_usec) * 1e-6 + (she_t . tv_sec - sht_t . tv_sec) + (she_t . tv_usec - sht_t . tv_usec) * 1e-6)) << endl;
//if (verbose) {
// printf("The final solution is: \n");
// PrintAry(finalVec,Size);
//}
hipFree(cuda_m);
hipFree(cuda_a);
hipFree(cuda_b);
free(hm);
free(ha);
free(hb);
}
| dc7f373806a2a98595592bdffd0d81de7ca98fc6.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "cuda.h"
#include <cuda_runtime.h>
#include <string.h>
#include <math.h>
#include <mpfr.h>
#include <iostream>
using namespace std;
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
FILE *fp;
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(double *trix, int size){
int i,j;
double lamda = -0.01;
double cof[2*size-1];
double coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
cof[j]=coe_i;
j=size-1-i;
cof[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
trix[i*size+j]=cof[size-1-i+j];
}
}
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/*-------------------------------------------------------
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
**-------------------------------------------------------
*/
__global__ void Fan1(double *c_m, double *c_a, int Size, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
c_m[Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t] = c_a[Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t] / c_a[Size*t+t];
}
/*-------------------------------------------------------
**-------------------------------------------------------
*/
__global__ void Fan2(double *c_m, double *c_a, double *c_b,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
c_a[Size*(xidx+1+t)+(yidx+t)] -= c_m[Size*(xidx+1+t)+t] * c_a[Size*t+(yidx+t)];
if(yidx == 0){
c_b[xidx+1+t] -= c_m[Size*(xidx+1+t)+(yidx+t)] * c_b[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 0;
int i, j, t;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
int Size;
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
break;
}
}
}
double* ha = new double[Size * Size];
create_matrix(ha, Size);
double* hb = new double[Size];
for (j =0; j< Size; j++)
hb[j]=1.0;
double* hm = new double[Size * Size];
double* finalVec = new double[Size];
//InitProblemOnce(filename);
for (i=0; i<Size*Size; i++)
hm[i] = (double)0.0;
//begin timing
struct timeval start_t;
struct timeval end_t;
struct timeval skt_t;
struct timeval ske_t;
struct timeval sht_t;
struct timeval she_t;
gettimeofday(&start_t,0L);
double *cuda_m,*cuda_a,*cuda_b;
// allocate memory on GPU
cudaMalloc((void **) &cuda_m, Size * Size * sizeof(double));
cudaMalloc((void **) &cuda_a, Size * Size * sizeof(double));
cudaMalloc((void **) &cuda_b, Size * sizeof(double));
// copy memory to GPU
cudaMemcpy(cuda_m, hm, Size * Size * sizeof(double),cudaMemcpyHostToDevice );
cudaMemcpy(cuda_a, ha, Size * Size * sizeof(double),cudaMemcpyHostToDevice );
cudaMemcpy(cuda_b, hb, Size * sizeof(double),cudaMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
gettimeofday(&skt_t,0L);
for (t=0; t<(Size-1); t++) {
Fan1<<<dimGrid,dimBlock>>>(cuda_m,cuda_a,Size,t);
cudaThreadSynchronize();
Fan2<<<dimGridXY,dimBlockXY>>>(cuda_m,cuda_a,cuda_b,Size,Size-t,t);
cudaThreadSynchronize();
checkCUDAError("Fan2");
}
gettimeofday(&ske_t,0L);
// copy memory back to CPU
cudaMemcpy(hm, cuda_m, Size * Size * sizeof(double),cudaMemcpyDeviceToHost );
cudaMemcpy(ha, cuda_a, Size * Size * sizeof(double),cudaMemcpyDeviceToHost );
cudaMemcpy(hb, cuda_b, Size * sizeof(double),cudaMemcpyDeviceToHost );
//BackSub();
// create a new vector to hold the final answer
// solve "bottom up"
gettimeofday(&sht_t,0L);
for(i=0;i<Size;i++){
finalVec[Size-i-1]=hb[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=ha[Size*(Size-i-1)+(Size-j-1)] * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ ha[Size*(Size-i-1)+(Size-i-1)];
}
gettimeofday(&she_t,0L);
gettimeofday(&end_t,0L);
((std::cout<<"time: ") << ((end_t . tv_sec - start_t . tv_sec) + (end_t . tv_usec - start_t . tv_usec) * 1e-6)) << endl;
((std::cout<<"kernel: ") << ((ske_t . tv_sec - skt_t . tv_sec) + (ske_t . tv_usec - skt_t . tv_usec) * 1e-6 + (she_t . tv_sec - sht_t . tv_sec) + (she_t . tv_usec - sht_t . tv_usec) * 1e-6)) << endl;
//if (verbose) {
// printf("The final solution is: \n");
// PrintAry(finalVec,Size);
//}
cudaFree(cuda_m);
cudaFree(cuda_a);
cudaFree(cuda_b);
free(hm);
free(ha);
free(hb);
}
|
a48c3deb890e87c38b6960ea6e48c1b7d07783b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "math_functions.hpp"
#include "hotspot_layer.hpp"
#define CV_PI 3.1415926535897932384626433832795
#define GAUSSIAN(x0,y0,x,y) 0.5 / gaussian_std / gaussian_std / CV_PI * exp(-0.5 * (((x0)-(x)) * ((x0)-(x)) + ((y0)-(y)) * ((y0)-(y))) / gaussian_std / gaussian_std)
namespace caffe {
__device__ __constant__ float kEps= 1e-4;
template <typename Dtype>
__global__ void HotspotFoward(const int num, const int num_point, const Dtype gaussian_std,
const int data_height, const int data_width, const bool mean_removed,
const int target_height, const int target_width,
const Dtype* point_data, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, num * target_height * target_width * num_point) {
int n = index / (target_height * target_width * num_point);
int sp = index % (target_height * target_width * num_point);
int h = sp / (target_width * num_point);
int pw = sp % (target_width * num_point);
int w = pw / num_point;
int p = pw % num_point;
Dtype p1 = (point_data[n * num_point * 2 + p * 2] / data_width + (mean_removed ? 0.5 : 0)) * target_width;
Dtype p2 = (point_data[n * num_point * 2 + p * 2 + 1] / data_height + (mean_removed ? 0.5 : 0)) * target_height;
Dtype temp = GAUSSIAN(p1, p2, w, h);
if (temp > kEps) {
top_data[(((n * num_point + p) * target_height + h) * target_width + w)] = temp;
}
else {
top_data[(((n * num_point + p) * target_height + h) * target_width + w)] = 0;
}
}
}
template <typename Dtype>
void HotspotLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* point_data = bottom[0]->gpu_data();
const int num_point = bottom[0]->shape(1) / 2;
const int num = bottom[0]->num();
HotspotFoward<Dtype> << <CAFFE_GET_BLOCKS(num * num_point * height_ * width_),
CAFFE_CUDA_NUM_THREADS >> >(num, num_point, gaussian_std_,
data_height_, data_width_, mean_removed_,
height_, width_,
point_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(HotspotLayer);
} // namespace caffe
| a48c3deb890e87c38b6960ea6e48c1b7d07783b0.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "math_functions.hpp"
#include "hotspot_layer.hpp"
#define CV_PI 3.1415926535897932384626433832795
#define GAUSSIAN(x0,y0,x,y) 0.5 / gaussian_std / gaussian_std / CV_PI * exp(-0.5 * (((x0)-(x)) * ((x0)-(x)) + ((y0)-(y)) * ((y0)-(y))) / gaussian_std / gaussian_std)
namespace caffe {
__device__ __constant__ float kEps= 1e-4;
template <typename Dtype>
__global__ void HotspotFoward(const int num, const int num_point, const Dtype gaussian_std,
const int data_height, const int data_width, const bool mean_removed,
const int target_height, const int target_width,
const Dtype* point_data, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, num * target_height * target_width * num_point) {
int n = index / (target_height * target_width * num_point);
int sp = index % (target_height * target_width * num_point);
int h = sp / (target_width * num_point);
int pw = sp % (target_width * num_point);
int w = pw / num_point;
int p = pw % num_point;
Dtype p1 = (point_data[n * num_point * 2 + p * 2] / data_width + (mean_removed ? 0.5 : 0)) * target_width;
Dtype p2 = (point_data[n * num_point * 2 + p * 2 + 1] / data_height + (mean_removed ? 0.5 : 0)) * target_height;
Dtype temp = GAUSSIAN(p1, p2, w, h);
if (temp > kEps) {
top_data[(((n * num_point + p) * target_height + h) * target_width + w)] = temp;
}
else {
top_data[(((n * num_point + p) * target_height + h) * target_width + w)] = 0;
}
}
}
template <typename Dtype>
void HotspotLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* point_data = bottom[0]->gpu_data();
const int num_point = bottom[0]->shape(1) / 2;
const int num = bottom[0]->num();
HotspotFoward<Dtype> << <CAFFE_GET_BLOCKS(num * num_point * height_ * width_),
CAFFE_CUDA_NUM_THREADS >> >(num, num_point, gaussian_std_,
data_height_, data_width_, mean_removed_,
height_, width_,
point_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(HotspotLayer);
} // namespace caffe
|
75f70cef703f4a4e6ba0fab21e22640a956193d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
//int threadIdx_y = mod((int)threadIdx.y,2);
int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8);
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__-1),rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_10__ = (__temp_6__ + 15 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__+1),rowy)]);
float __temp_18__ = (__temp_14__ + 5 * __temp_17__);
float __temp_19__ = (__temp_18__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_19__;
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_32__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__-1),rowy)]);
float __temp_35__ = (__tilevar_1__[__iter_11__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__);
float __temp_39__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_40__ = (__temp_36__ + 15 * __temp_39__);
float __temp_43__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_44__ = (__temp_40__ + 12 * __temp_43__);
float __temp_47__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__+1),rowy)]);
float __temp_48__ = (__temp_44__ + 5 * __temp_47__);
float __temp_49__ = (__temp_48__ / 118);
__tilevar_0__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)] = __temp_49__;
}
}
__syncthreads();
int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_60__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_16__-1),rowy)]);
float __temp_61__ = (__tilevar_0__[__iter_17__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__);
float __temp_63__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_64__ = (__temp_62__ + 15 * __temp_63__);
float __temp_65__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_66__ = (__temp_64__ + 12 * __temp_65__);
float __temp_67__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_16__+1),rowy)]);
float __temp_68__ = (__temp_66__ + 5 * __temp_67__);
float __temp_69__ = (__temp_68__ / 118);
__tilevar_1__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)] = __temp_69__;
}
}
__syncthreads();
int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_80__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_22__-1),rowy)]);
float __temp_81__ = (__tilevar_1__[__iter_23__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__);
float __temp_83__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_84__ = (__temp_82__ + 15 * __temp_83__);
float __temp_85__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_86__ = (__temp_84__ + 12 * __temp_85__);
float __temp_87__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_22__+1),rowy)]);
float __temp_88__ = (__temp_86__ + 5 * __temp_87__);
float __temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M)*(__iter_22__)] = __temp_89__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| 75f70cef703f4a4e6ba0fab21e22640a956193d0.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define mod(x,y) ( (x) & (y-1))
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_0__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
float * __tilevar_1__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*((FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
int rowy = FORMA_BLOCKDIM_Y+16;
//int threadIdx_y = mod((int)threadIdx.y,2);
int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8);
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__ += FORMA_BLOCKDIM_Y) {
int __iter_2__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_2__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(N-1)) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_0__[__iter_3__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_2__,rowy)] = input[__iter_3__+M*__iter_2__];
}
__syncthreads();
int __iter_4__ = FORMA_MAX((__iter_1__-1),1) + (int)(threadIdx.y) ;
if( __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(N-2)) ){
int __iter_5__ = FORMA_MAX((__iter_0__+1),1) + (int)(threadIdx.x) ;
if( __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__-1),rowy)]);
float __temp_5__ = (__tilevar_0__[__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__);
float __temp_9__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_10__ = (__temp_6__ + 15 * __temp_9__);
float __temp_13__ = (__tilevar_0__[__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = (__tilevar_0__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_4__+1),rowy)]);
float __temp_18__ = (__temp_14__ + 5 * __temp_17__);
float __temp_19__ = (__temp_18__ / 118);
__tilevar_1__[__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_4__,rowy)] = __temp_19__;
}
}
__syncthreads();
int __iter_10__ = FORMA_MAX((__iter_1__-2),1) + (int)(threadIdx.y) ;
if( __iter_10__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(N-2)) ){
int __iter_11__ = FORMA_MAX((__iter_0__+2),1) + (int)(threadIdx.x) ;
if( __iter_11__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_32__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__-1),rowy)]);
float __temp_35__ = (__tilevar_1__[__iter_11__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__);
float __temp_39__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_40__ = (__temp_36__ + 15 * __temp_39__);
float __temp_43__ = (__tilevar_1__[__iter_11__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)]);
float __temp_44__ = (__temp_40__ + 12 * __temp_43__);
float __temp_47__ = (__tilevar_1__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_10__+1),rowy)]);
float __temp_48__ = (__temp_44__ + 5 * __temp_47__);
float __temp_49__ = (__temp_48__ / 118);
__tilevar_0__[__iter_11__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_10__,rowy)] = __temp_49__;
}
}
__syncthreads();
int __iter_16__ = FORMA_MAX((__iter_1__-3),1) + (int)(threadIdx.y) ;
if( __iter_16__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(N-2)) ){
int __iter_17__ = FORMA_MAX((__iter_0__+3),1) + (int)(threadIdx.x) ;
if( __iter_17__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_60__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_16__-1),rowy)]);
float __temp_61__ = (__tilevar_0__[__iter_17__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__);
float __temp_63__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_64__ = (__temp_62__ + 15 * __temp_63__);
float __temp_65__ = (__tilevar_0__[__iter_17__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)]);
float __temp_66__ = (__temp_64__ + 12 * __temp_65__);
float __temp_67__ = (__tilevar_0__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_16__+1),rowy)]);
float __temp_68__ = (__temp_66__ + 5 * __temp_67__);
float __temp_69__ = (__temp_68__ / 118);
__tilevar_1__[__iter_17__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_16__,rowy)] = __temp_69__;
}
}
__syncthreads();
int __iter_22__ = FORMA_MAX((__iter_1__-4),1) + (int)(threadIdx.y) ;
if( __iter_22__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(N-2)) ){
int __iter_23__ = FORMA_MAX((__iter_0__+4),1) + (int)(threadIdx.x) ;
if( __iter_23__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_80__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_22__-1),rowy)]);
float __temp_81__ = (__tilevar_1__[__iter_23__+(-1)-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__);
float __temp_83__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_84__ = (__temp_82__ + 15 * __temp_83__);
float __temp_85__ = (__tilevar_1__[__iter_23__+1-__iter_0__+FORMA_BLOCKDIM_X*mod(__iter_22__,rowy)]);
float __temp_86__ = (__temp_84__ + 12 * __temp_85__);
float __temp_87__ = (__tilevar_1__[__iter_23__-__iter_0__+FORMA_BLOCKDIM_X*mod((__iter_22__+1),rowy)]);
float __temp_88__ = (__temp_86__ + 5 * __temp_87__);
float __temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_23__+(M)*(__iter_22__)] = __temp_89__;
}
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(2*(FORMA_BLOCKDIM_Y+16)*FORMA_BLOCKDIM_X);
return SMemSize;
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 16;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
d75dc9eb1b70eab866d6a925bb8d7f7d567ae33b.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// Useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// Parameters for the boids algorithm.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
//Size of the starting area in simulation space. --> -scene_scale to scene_scale in every dimension
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// These buffers are here to hold all your boid information.
// These are allocated in Boids::initSimulation.
// We need two velocity buffers in a simulation so we can ping-pong the buffers.
// This way we can modify the data of one of the velocity buffers while reading from the other
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // Stores which index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // Stores which grid cell is this particle in?
// Needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // Stores Which part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this grid cell?
// Additional buffers needed to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_coherentVel; //rearranged form of dev_vel2 so that it is more memory coherent
glm::vec3 *dev_coherentPos; //rearranged form of dev_pos so that it is more memory coherent
// Grid parameters based on simulation parameters.
// These are computed in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/*
* Helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index)
{
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* This is a basic CUDA kernel.
* CUDA kernel for generating boids with a random position somewhere inside the simulation space
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N)
{
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); //To ensure if N is not an exact multiple of blocksize,
//the remainder of N/blocksize is still a portion of N which
//would be ignored if we dont have an extra block to
//accommodate the remainder of the N objects
// Basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
// Allocating all buffers at once is more efficient.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
//generate random initial positions for boids
kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> >(1, numObjects, dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
//Computing grid parameters
gridCellWidth = 0.5f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; //not sure why + 1
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_coherentPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_coherentPos failed!");
hipMalloc((void**)&dev_coherentVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_coherentVel failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/*
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/*
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities)
{
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* Helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity of the boid with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 v1 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v2 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v3 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 percieved_center_of_mass = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 separate_vector = glm::vec3(0.0f, 0.0f, 0.0f);
int neighborCount1 = 0;
int neighborCount3 = 0;
float distance = 0.0f;
for (int i = 0; i < N; i++)
{
if (i != iSelf)
{
// 3 rules for the basic boids algorithm
distance = glm::distance(pos[i], pos[iSelf]);
if (distance < rule1Distance)
{
percieved_center_of_mass += pos[i];
neighborCount1++;
}
if (distance < rule2Distance)
{
separate_vector -= (pos[i] - pos[iSelf]);
}
if (distance < rule3Distance)
{
perceived_velocity += vel[i];
neighborCount3++;
}
}
}
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (neighborCount1 != 0)
{
percieved_center_of_mass /= neighborCount1;
v1 = (percieved_center_of_mass - pos[iSelf])*rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
v2 = separate_vector*rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (neighborCount3 != 0)
{
perceived_velocity /= neighborCount3;
v3 = perceived_velocity*rule3Scale;
}
return v1 + v2 + v3;
}
/*
* Implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
if (glm::length(newVel) > maxSpeed)
{
newVel = glm::normalize(newVel) * maxSpeed;
}
// Record the new velocity into vel2.
// Question: why NOT vel1?
// Answer: vel1 is being read from as well in this kernel. And so if we wrote into it some threads might read
// in the incorrect data or they simply may not be able to read it because it is being written into, etc. This
// is why we ping-pog the velocity buffers
vel2[index] = newVel;
}
/*
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel)
{
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// Method of computing a 1D index from a 3D grid index.
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution)
{
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
// Go through the boids and determine which grid cell to bin them into
glm::ivec3 boidPos = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(boidPos.x, boidPos.y, boidPos.z, gridResolution);
// Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// This is useful for indicating that a cell does not enclose any boids
// Called at the beginning of every step of a simulation to reset the buffer values to a default value which
// tells us if the cell holds any boids or not
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N)
{
intBuffer[index] = value;
}
}
// Identify the start and end points of each gridcell in the gridIndices array.
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices)
{
//go through particleGridIndices identifying when there is a change in there value,
//which signifies a change in the gridcell we are dealing with
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
if (index == 0) //edge case
{
gridCellStartIndices[particleGridIndices[index]] = 0;
}
else if (index == N - 1) //edge case
{
gridCellEndIndices[particleGridIndices[index]] = N - 1;
}
else if (particleGridIndices[index] != particleGridIndices[index + 1])
{
//inbetween grid cells with no boids are set to -1 --> done before when both the arrays were reset to -1
//change in gridcell
gridCellEndIndices[particleGridIndices[index]] = index;
gridCellStartIndices[particleGridIndices[index + 1]] = index + 1;
}
}
// Store the reshuffled position and velocity buffers that are more memory coherent in new coherentPos and coherentVel buffers
__global__ void kernSetCoherentPosVel(int N, int *particleArrayIndices,
int *gridCellStartIndices, int *gridCellEndIndices,
const glm::vec3 *pos, const glm::vec3 *vel,
glm::vec3 *coherentPos, glm::vec3 *coherentVel)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
int coherentindex = particleArrayIndices[index];
coherentPos[index] = pos[coherentindex];
coherentVel[index] = vel[coherentindex];
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
// Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
//find boid position
//then use that position to determine the grid cell the boid belongs to
//use that information to find the 8 cells you have to check
glm::ivec3 boidPos = (pos[index] - gridMin) * inverseCellWidth;
int x = boidPos.x;
int y = boidPos.y;
int z = boidPos.z;
glm::vec3 v1 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v2 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v3 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 percieved_center_of_mass = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 separate_vector = glm::vec3(0.0f, 0.0f, 0.0f);
int neighborCount1 = 0;
int neighborCount3 = 0;
float distance = 0.0f;
for (int i = -1; i <= 1; i++)
{
for (int j = -1; j <= 1; j++)
{
for (int k = -1; k <= 1; k++)
{
int _x = x + i;
int _y = y + j;
int _z = z + k;
_x = imax(_x, 0);
_y = imax(_y, 0);
_z = imax(_z, 0);
_x = imin(_x, gridResolution - 1);
_y = imin(_y, gridResolution - 1);
_z = imin(_z, gridResolution - 1);
int boidGridCellindex = gridIndex3Dto1D(_x, _y, _z, gridResolution);
// Identify which cells may contain neighboring boids. This isn't always 8.
// SWITCHED to identifying upto 27 neighboring cells containing boids
if (gridCellStartIndices[boidGridCellindex] != -1)
{
//we know the grid cell is empty if its start or end indices have been set to -1
// For each cell that contains boids and needs to be checked,
// read the start/end indices in the boid pointer array.
// Now go through the boids in that grid cell and apply the rules
// to it if it falls within the neighbor hood distance
for (int h = gridCellStartIndices[boidGridCellindex]; h <= gridCellEndIndices[boidGridCellindex]; h++)
{
//Access each boid in the cell and compute velocity change from
int bindex = particleArrayIndices[h];
if (h != index)
{
//Compute velocity change based on rules
distance = glm::distance(pos[bindex], pos[index]);
if (distance < rule1Distance)
{
percieved_center_of_mass += pos[bindex];
neighborCount1++;
}
if (distance < rule2Distance)
{
separate_vector -= (pos[bindex] - pos[index]);
}
if (distance < rule3Distance)
{
perceived_velocity += vel1[bindex];
neighborCount3++;
}
}
}
}
}
}
}
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (neighborCount1 != 0)
{
percieved_center_of_mass /= neighborCount1;
v1 = (percieved_center_of_mass - pos[index])*rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
v2 = separate_vector*rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (neighborCount3 != 0)
{
perceived_velocity /= neighborCount3;
v3 = perceived_velocity*rule3Scale;
}
glm::vec3 newVel = vel1[index] + v1 + v2 + v3;
// Clamp the speed change before putting the new speed in vel2
if (glm::length(newVel) > maxSpeed)
{
newVel = glm::normalize(newVel) * maxSpeed;
}
vel2[index] = newVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *coherentPos, glm::vec3 *coherentVel, glm::vec3 *vel2)
{
// Very similar to kernUpdateVelNeighborSearchScattered, except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer directly to coherentPos and CoherentVel.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
//find boid position
//then use that position to determine the gridcell it belongs to
//use that information to find the 8 cells you have to check
glm::ivec3 boidPos = (coherentPos[index] - gridMin) * inverseCellWidth;
int x = boidPos.x;
int y = boidPos.y;
int z = boidPos.z;
glm::vec3 v1 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v2 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v3 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 percieved_center_of_mass = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 separate_vector = glm::vec3(0.0f, 0.0f, 0.0f);
int neighborCount1 = 0;
int neighborCount3 = 0;
float distance = 0.0f;
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
for (int k = -1; k <= 1; k++) //z axis
{
for (int j = -1; j <= 1; j++) //y axis
{
for (int i = -1; i <= 1; i++) //x axis
{
int _x = x + i;
int _y = y + j;
int _z = z + k;
_x = imax(_x, 0);
_y = imax(_y, 0);
_z = imax(_z, 0);
_x = imin(_x, gridResolution - 1);
_y = imin(_y, gridResolution - 1);
_z = imin(_z, gridResolution - 1);
int boidGridCellindex = gridIndex3Dto1D(_x, _y, _z, gridResolution);
// Identify which cells may contain neighbors. This isn't always 8.
// For each cell, read the start/end indices in the boid pointer array.
if (gridCellStartIndices[boidGridCellindex] != -1)
{
//we know the grid cell is empty if its start or end indices have been set to -1
//now go through the boids in that grid cell and apply the rules
//to it if it falls within the neighbor hood distance
for (int h = gridCellStartIndices[boidGridCellindex]; h <= gridCellEndIndices[boidGridCellindex]; h++)
{
if (h != index)
{
// Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
distance = glm::distance(coherentPos[h], coherentPos[index]);
if (distance < rule1Distance)
{
percieved_center_of_mass += coherentPos[h];
neighborCount1++;
}
if (distance < rule2Distance)
{
separate_vector -= (coherentPos[h] - coherentPos[index]);
}
if (distance < rule3Distance)
{
perceived_velocity += coherentVel[h];
neighborCount3++;
}
}
}
}
}
}
}
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (neighborCount1 != 0)
{
percieved_center_of_mass /= neighborCount1;
v1 = (percieved_center_of_mass - coherentPos[index])*rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
v2 = separate_vector*rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (neighborCount3 != 0)
{
perceived_velocity /= neighborCount3;
v3 = perceived_velocity*rule3Scale;
}
glm::vec3 newVel = coherentVel[index] + v1 + v2 + v3;
// Clamp the speed change before putting the new speed in vel2
if (glm::length(newVel) > maxSpeed)
{
newVel = glm::normalize(newVel) * maxSpeed;
}
vel2[index] = newVel;
}
//Step the entire N-body simulation by `dt` seconds.
void Boids::stepSimulationNaive(float dt)
{
//Step the simulation forward in time.
//Setup thread/block execution configuration
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);//no dim1, dim3 automatically makes the ther dimensions 0
//update boid velocities
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
//update boid positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
//Ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt)
{
// Uniform Grid Neighbor search using Thrust sort.
dim3 fullBlocksPerGrid_gridsize((gridCellCount + blockSize - 1) / blockSize);//no dim1, dim3 automatically makes the ther dimensions 0
dim3 fullBlocksPerGrid_boids((numObjects + blockSize - 1) / blockSize);
// Reset buffers start and end indices buffers
kernResetIntBuffer << <fullBlocksPerGrid_gridsize, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGrid_gridsize, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
// Label each particle with its array index as well as its grid index.
// Use 2x width grids.
// recompute grid cell indices and particlearray indices every timestep
kernComputeIndices << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Now sort the dev_particleGridIndices so that boids belonging to the same grid cell
// are next to each other in the gridIndices array --> Use Thrust to sort the array
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// Unstable key sort using Thrust. A stable sort isn't necessary
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("thrust sorting failed!");
// assuming the boidGridIndices are sorted, assign values to the arrays keeping
// track of the data in dev_particleArrayIndices for each cell.
// unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
// Update positions
kernUpdatePos << <fullBlocksPerGrid_boids, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
//Ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt)
{
// Uniform Grid Neighbor search using Thrust sort with cell-coherent data.
dim3 fullBlocksPerGrid_gridsize((gridCellCount + blockSize - 1) / blockSize);//no dim1, dim3 automatically makes the ther dimensions 0
dim3 fullBlocksPerGrid_boids((numObjects + blockSize - 1) / blockSize);
//Reset buffers start and end indices buffers
kernResetIntBuffer << <fullBlocksPerGrid_gridsize, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGrid_gridsize, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
// Label each particle with its array index as well as its grid index.
// Use 2x width grids
// recompute grid cell indices and particlearray indices every timestep
kernComputeIndices << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
// Now sort the dev_particleGridIndices so that boids belonging to the same grid cell
// are next to each other in the gridIndices array --> Use Thrust to sort the array
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// Unstable key sort using Thrust. A stable sort isn't necessary
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("thrust sorting failed!");
// Assuming the boidGridIndices are sorted, assign values to the arrays keeping
// track of the data in dev_particleArrayIndices for each cell.
// Unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all the boid data(position and velocity)
// in the simulation array, such that it is memory coherent arranged in order of grid cells
kernSetCoherentPosVel << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, dev_particleArrayIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_coherentPos, dev_coherentVel);
// Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_coherentPos, dev_coherentVel, dev_vel2);
// Update positions
kernUpdatePos << <fullBlocksPerGrid_boids, blockSize >> >(numObjects, dt, dev_coherentPos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// Ping-pong Coherent and regular pos buffers
std::swap(dev_coherentPos, dev_pos);
// Ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
//Free memory that was allocated in initSimulation
void Boids::endSimulation()
{
//Free any buffers here
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_coherentPos);
hipFree(dev_coherentVel);
}
void Boids::unitTest()
{
// Test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++)
{
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// Copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// Copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++)
{
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// Cleanup
delete[] intKeys;
delete[] intValues;
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
} | d75dc9eb1b70eab866d6a925bb8d7f7d567ae33b.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// Useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// Parameters for the boids algorithm.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
//Size of the starting area in simulation space. --> -scene_scale to scene_scale in every dimension
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// These buffers are here to hold all your boid information.
// These are allocated in Boids::initSimulation.
// We need two velocity buffers in a simulation so we can ping-pong the buffers.
// This way we can modify the data of one of the velocity buffers while reading from the other
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // Stores which index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // Stores which grid cell is this particle in?
// Needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // Stores Which part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this grid cell?
// Additional buffers needed to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_coherentVel; //rearranged form of dev_vel2 so that it is more memory coherent
glm::vec3 *dev_coherentPos; //rearranged form of dev_pos so that it is more memory coherent
// Grid parameters based on simulation parameters.
// These are computed in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/*
* Helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index)
{
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* This is a basic CUDA kernel.
* CUDA kernel for generating boids with a random position somewhere inside the simulation space
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N)
{
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); //To ensure if N is not an exact multiple of blocksize,
//the remainder of N/blocksize is still a portion of N which
//would be ignored if we dont have an extra block to
//accommodate the remainder of the N objects
// Basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
// Allocating all buffers at once is more efficient.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
//generate random initial positions for boids
kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> >(1, numObjects, dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
//Computing grid parameters
gridCellWidth = 0.5f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; //not sure why + 1
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_coherentPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_coherentPos failed!");
cudaMalloc((void**)&dev_coherentVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_coherentVel failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/*
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/*
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities)
{
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* Helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity of the boid with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel)
{
glm::vec3 v1 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v2 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v3 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 percieved_center_of_mass = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 separate_vector = glm::vec3(0.0f, 0.0f, 0.0f);
int neighborCount1 = 0;
int neighborCount3 = 0;
float distance = 0.0f;
for (int i = 0; i < N; i++)
{
if (i != iSelf)
{
// 3 rules for the basic boids algorithm
distance = glm::distance(pos[i], pos[iSelf]);
if (distance < rule1Distance)
{
percieved_center_of_mass += pos[i];
neighborCount1++;
}
if (distance < rule2Distance)
{
separate_vector -= (pos[i] - pos[iSelf]);
}
if (distance < rule3Distance)
{
perceived_velocity += vel[i];
neighborCount3++;
}
}
}
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (neighborCount1 != 0)
{
percieved_center_of_mass /= neighborCount1;
v1 = (percieved_center_of_mass - pos[iSelf])*rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
v2 = separate_vector*rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (neighborCount3 != 0)
{
perceived_velocity /= neighborCount3;
v3 = perceived_velocity*rule3Scale;
}
return v1 + v2 + v3;
}
/*
* Implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
// Compute a new velocity based on pos and vel1
glm::vec3 newVel = vel1[index] + computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
if (glm::length(newVel) > maxSpeed)
{
newVel = glm::normalize(newVel) * maxSpeed;
}
// Record the new velocity into vel2.
// Question: why NOT vel1?
// Answer: vel1 is being read from as well in this kernel. And so if we wrote into it some threads might read
// in the incorrect data or they simply may not be able to read it because it is being written into, etc. This
// is why we ping-pog the velocity buffers
vel2[index] = newVel;
}
/*
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel)
{
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// Method of computing a 1D index from a 3D grid index.
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution)
{
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
// Go through the boids and determine which grid cell to bin them into
glm::ivec3 boidPos = (pos[index] - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(boidPos.x, boidPos.y, boidPos.z, gridResolution);
// Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// This is useful for indicating that a cell does not enclose any boids
// Called at the beginning of every step of a simulation to reset the buffer values to a default value which
// tells us if the cell holds any boids or not
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N)
{
intBuffer[index] = value;
}
}
// Identify the start and end points of each gridcell in the gridIndices array.
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices)
{
//go through particleGridIndices identifying when there is a change in there value,
//which signifies a change in the gridcell we are dealing with
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
if (index == 0) //edge case
{
gridCellStartIndices[particleGridIndices[index]] = 0;
}
else if (index == N - 1) //edge case
{
gridCellEndIndices[particleGridIndices[index]] = N - 1;
}
else if (particleGridIndices[index] != particleGridIndices[index + 1])
{
//inbetween grid cells with no boids are set to -1 --> done before when both the arrays were reset to -1
//change in gridcell
gridCellEndIndices[particleGridIndices[index]] = index;
gridCellStartIndices[particleGridIndices[index + 1]] = index + 1;
}
}
// Store the reshuffled position and velocity buffers that are more memory coherent in new coherentPos and coherentVel buffers
__global__ void kernSetCoherentPosVel(int N, int *particleArrayIndices,
int *gridCellStartIndices, int *gridCellEndIndices,
const glm::vec3 *pos, const glm::vec3 *vel,
glm::vec3 *coherentPos, glm::vec3 *coherentVel)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
int coherentindex = particleArrayIndices[index];
coherentPos[index] = pos[coherentindex];
coherentVel[index] = vel[coherentindex];
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2)
{
// Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
//find boid position
//then use that position to determine the grid cell the boid belongs to
//use that information to find the 8 cells you have to check
glm::ivec3 boidPos = (pos[index] - gridMin) * inverseCellWidth;
int x = boidPos.x;
int y = boidPos.y;
int z = boidPos.z;
glm::vec3 v1 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v2 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v3 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 percieved_center_of_mass = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 separate_vector = glm::vec3(0.0f, 0.0f, 0.0f);
int neighborCount1 = 0;
int neighborCount3 = 0;
float distance = 0.0f;
for (int i = -1; i <= 1; i++)
{
for (int j = -1; j <= 1; j++)
{
for (int k = -1; k <= 1; k++)
{
int _x = x + i;
int _y = y + j;
int _z = z + k;
_x = imax(_x, 0);
_y = imax(_y, 0);
_z = imax(_z, 0);
_x = imin(_x, gridResolution - 1);
_y = imin(_y, gridResolution - 1);
_z = imin(_z, gridResolution - 1);
int boidGridCellindex = gridIndex3Dto1D(_x, _y, _z, gridResolution);
// Identify which cells may contain neighboring boids. This isn't always 8.
// SWITCHED to identifying upto 27 neighboring cells containing boids
if (gridCellStartIndices[boidGridCellindex] != -1)
{
//we know the grid cell is empty if its start or end indices have been set to -1
// For each cell that contains boids and needs to be checked,
// read the start/end indices in the boid pointer array.
// Now go through the boids in that grid cell and apply the rules
// to it if it falls within the neighbor hood distance
for (int h = gridCellStartIndices[boidGridCellindex]; h <= gridCellEndIndices[boidGridCellindex]; h++)
{
//Access each boid in the cell and compute velocity change from
int bindex = particleArrayIndices[h];
if (h != index)
{
//Compute velocity change based on rules
distance = glm::distance(pos[bindex], pos[index]);
if (distance < rule1Distance)
{
percieved_center_of_mass += pos[bindex];
neighborCount1++;
}
if (distance < rule2Distance)
{
separate_vector -= (pos[bindex] - pos[index]);
}
if (distance < rule3Distance)
{
perceived_velocity += vel1[bindex];
neighborCount3++;
}
}
}
}
}
}
}
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (neighborCount1 != 0)
{
percieved_center_of_mass /= neighborCount1;
v1 = (percieved_center_of_mass - pos[index])*rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
v2 = separate_vector*rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (neighborCount3 != 0)
{
perceived_velocity /= neighborCount3;
v3 = perceived_velocity*rule3Scale;
}
glm::vec3 newVel = vel1[index] + v1 + v2 + v3;
// Clamp the speed change before putting the new speed in vel2
if (glm::length(newVel) > maxSpeed)
{
newVel = glm::normalize(newVel) * maxSpeed;
}
vel2[index] = newVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *coherentPos, glm::vec3 *coherentVel, glm::vec3 *vel2)
{
// Very similar to kernUpdateVelNeighborSearchScattered, except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer directly to coherentPos and CoherentVel.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N)
{
return;
}
//find boid position
//then use that position to determine the gridcell it belongs to
//use that information to find the 8 cells you have to check
glm::ivec3 boidPos = (coherentPos[index] - gridMin) * inverseCellWidth;
int x = boidPos.x;
int y = boidPos.y;
int z = boidPos.z;
glm::vec3 v1 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v2 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 v3 = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 percieved_center_of_mass = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 perceived_velocity = glm::vec3(0.0f, 0.0f, 0.0f);
glm::vec3 separate_vector = glm::vec3(0.0f, 0.0f, 0.0f);
int neighborCount1 = 0;
int neighborCount3 = 0;
float distance = 0.0f;
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
for (int k = -1; k <= 1; k++) //z axis
{
for (int j = -1; j <= 1; j++) //y axis
{
for (int i = -1; i <= 1; i++) //x axis
{
int _x = x + i;
int _y = y + j;
int _z = z + k;
_x = imax(_x, 0);
_y = imax(_y, 0);
_z = imax(_z, 0);
_x = imin(_x, gridResolution - 1);
_y = imin(_y, gridResolution - 1);
_z = imin(_z, gridResolution - 1);
int boidGridCellindex = gridIndex3Dto1D(_x, _y, _z, gridResolution);
// Identify which cells may contain neighbors. This isn't always 8.
// For each cell, read the start/end indices in the boid pointer array.
if (gridCellStartIndices[boidGridCellindex] != -1)
{
//we know the grid cell is empty if its start or end indices have been set to -1
//now go through the boids in that grid cell and apply the rules
//to it if it falls within the neighbor hood distance
for (int h = gridCellStartIndices[boidGridCellindex]; h <= gridCellEndIndices[boidGridCellindex]; h++)
{
if (h != index)
{
// Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
distance = glm::distance(coherentPos[h], coherentPos[index]);
if (distance < rule1Distance)
{
percieved_center_of_mass += coherentPos[h];
neighborCount1++;
}
if (distance < rule2Distance)
{
separate_vector -= (coherentPos[h] - coherentPos[index]);
}
if (distance < rule3Distance)
{
perceived_velocity += coherentVel[h];
neighborCount3++;
}
}
}
}
}
}
}
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (neighborCount1 != 0)
{
percieved_center_of_mass /= neighborCount1;
v1 = (percieved_center_of_mass - coherentPos[index])*rule1Scale;
}
// Rule 2: boids try to stay a distance d away from each other
v2 = separate_vector*rule2Scale;
// Rule 3: boids try to match the speed of surrounding boids
if (neighborCount3 != 0)
{
perceived_velocity /= neighborCount3;
v3 = perceived_velocity*rule3Scale;
}
glm::vec3 newVel = coherentVel[index] + v1 + v2 + v3;
// Clamp the speed change before putting the new speed in vel2
if (glm::length(newVel) > maxSpeed)
{
newVel = glm::normalize(newVel) * maxSpeed;
}
vel2[index] = newVel;
}
//Step the entire N-body simulation by `dt` seconds.
void Boids::stepSimulationNaive(float dt)
{
//Step the simulation forward in time.
//Setup thread/block execution configuration
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);//no dim1, dim3 automatically makes the ther dimensions 0
//update boid velocities
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!");
//update boid positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_pos, dev_vel1);
checkCUDAErrorWithLine("kernUpdatePos failed!");
//Ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationScatteredGrid(float dt)
{
// Uniform Grid Neighbor search using Thrust sort.
dim3 fullBlocksPerGrid_gridsize((gridCellCount + blockSize - 1) / blockSize);//no dim1, dim3 automatically makes the ther dimensions 0
dim3 fullBlocksPerGrid_boids((numObjects + blockSize - 1) / blockSize);
// Reset buffers start and end indices buffers
kernResetIntBuffer << <fullBlocksPerGrid_gridsize, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGrid_gridsize, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
// Label each particle with its array index as well as its grid index.
// Use 2x width grids.
// recompute grid cell indices and particlearray indices every timestep
kernComputeIndices << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
// Now sort the dev_particleGridIndices so that boids belonging to the same grid cell
// are next to each other in the gridIndices array --> Use Thrust to sort the array
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// Unstable key sort using Thrust. A stable sort isn't necessary
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("thrust sorting failed!");
// assuming the boidGridIndices are sorted, assign values to the arrays keeping
// track of the data in dev_particleArrayIndices for each cell.
// unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
// Update positions
kernUpdatePos << <fullBlocksPerGrid_boids, blockSize >> >(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
//Ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
void Boids::stepSimulationCoherentGrid(float dt)
{
// Uniform Grid Neighbor search using Thrust sort with cell-coherent data.
dim3 fullBlocksPerGrid_gridsize((gridCellCount + blockSize - 1) / blockSize);//no dim1, dim3 automatically makes the ther dimensions 0
dim3 fullBlocksPerGrid_boids((numObjects + blockSize - 1) / blockSize);
//Reset buffers start and end indices buffers
kernResetIntBuffer << <fullBlocksPerGrid_gridsize, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGrid_gridsize, blockSize >> > (gridCellCount, dev_gridCellEndIndices, -1);
// Label each particle with its array index as well as its grid index.
// Use 2x width grids
// recompute grid cell indices and particlearray indices every timestep
kernComputeIndices << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, gridSideCount,
gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices,
dev_particleGridIndices);
// Now sort the dev_particleGridIndices so that boids belonging to the same grid cell
// are next to each other in the gridIndices array --> Use Thrust to sort the array
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_particleGridIndices);
thrust::device_ptr<int> dev_thrust_values(dev_particleArrayIndices);
// Unstable key sort using Thrust. A stable sort isn't necessary
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + numObjects, dev_thrust_values);
checkCUDAErrorWithLine("thrust sorting failed!");
// Assuming the boidGridIndices are sorted, assign values to the arrays keeping
// track of the data in dev_particleArrayIndices for each cell.
// Unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
kernIdentifyCellStartEnd << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
// BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all the boid data(position and velocity)
// in the simulation array, such that it is memory coherent arranged in order of grid cells
kernSetCoherentPosVel << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, dev_particleArrayIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_pos, dev_vel1, dev_coherentPos, dev_coherentVel);
// Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid_boids, blockSize >> > (numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, gridCellWidth,
dev_gridCellStartIndices, dev_gridCellEndIndices,
dev_coherentPos, dev_coherentVel, dev_vel2);
// Update positions
kernUpdatePos << <fullBlocksPerGrid_boids, blockSize >> >(numObjects, dt, dev_coherentPos, dev_vel2);
checkCUDAErrorWithLine("kernUpdatePos failed!");
// Ping-pong Coherent and regular pos buffers
std::swap(dev_coherentPos, dev_pos);
// Ping-pong the velocity buffers
std::swap(dev_vel1, dev_vel2);
}
//Free memory that was allocated in initSimulation
void Boids::endSimulation()
{
//Free any buffers here
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_coherentPos);
cudaFree(dev_coherentVel);
}
void Boids::unitTest()
{
// Test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++)
{
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// Copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// Copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++)
{
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// Cleanup
delete[] intKeys;
delete[] intValues;
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
} |
e335c873adf30b868d8d5e21f9c4e43742bcaae0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "concat_layer.hpp"
#include "math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom.size() == 1) { return; }
Dtype* top_data = top[0]->mutable_gpu_data();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = true;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom_data, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
offset_concat_axis += bottom_concat_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer);
} // namespace caffe
| e335c873adf30b868d8d5e21f9c4e43742bcaae0.cu | #include <vector>
#include "concat_layer.hpp"
#include "math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
template <typename Dtype>
void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (bottom.size() == 1) { return; }
Dtype* top_data = top[0]->mutable_gpu_data();
int offset_concat_axis = 0;
const int top_concat_axis = top[0]->shape(concat_axis_);
const bool kForward = true;
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
const int bottom_concat_axis = bottom[i]->shape(concat_axis_);
const int bottom_concat_size = bottom_concat_axis * concat_input_size_;
const int nthreads = bottom_concat_size * num_concats_;
Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom_data, kForward, num_concats_, concat_input_size_,
top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data);
offset_concat_axis += bottom_concat_axis;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer);
} // namespace caffe
|
6969a31ac8410a101a2c4733573063c6504fdb4e.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "parboil.h"
//#include "file.h"
#include "../benchmark_common.h"
#define CUERR \
{ \
hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \
return -1; \
} \
}
// Block index
#define bx blockIdx.x
#define by blockIdx.y
// Thread index
#define tx threadIdx.x
// Possible values are 2, 4, 8 and 16
#define R 2
inline __device__ float2 operator*(float2 a, float2 b) {
return make_float2(a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x);
}
inline __device__ float2 operator+(float2 a, float2 b) {
return make_float2(a.x + b.x, a.y + b.y);
}
inline __device__ float2 operator-(float2 a, float2 b) {
return make_float2(a.x - b.x, a.y - b.y);
}
inline __device__ float2 operator*(float2 a, float b) {
return make_float2(b * a.x, b * a.y);
}
#define COS_PI_8 0.923879533f
#define SIN_PI_8 0.382683432f
#define exp_1_16 make_float2(COS_PI_8, -SIN_PI_8)
#define exp_3_16 make_float2(SIN_PI_8, -COS_PI_8)
#define exp_5_16 make_float2(-SIN_PI_8, -COS_PI_8)
#define exp_7_16 make_float2(-COS_PI_8, -SIN_PI_8)
#define exp_9_16 make_float2(-COS_PI_8, SIN_PI_8)
#define exp_1_8 make_float2(1, -1)
#define exp_1_4 make_float2(0, -1)
#define exp_3_8 make_float2(-1, -1)
__device__ void GPU_FFT2(float2& v1, float2& v2) {
float2 v0 = v1;
v1 = v0 + v2;
v2 = v0 - v2;
}
__device__ void GPU_FFT4(float2& v0, float2& v1, float2& v2, float2& v3) {
GPU_FFT2(v0, v2);
GPU_FFT2(v1, v3);
v3 = v3 * exp_1_4;
GPU_FFT2(v0, v1);
GPU_FFT2(v2, v3);
}
inline __device__ void GPU_FFT2(float2* v) {
GPU_FFT2(v[0], v[1]);
}
inline __device__ void GPU_FFT4(float2* v) {
GPU_FFT4(v[0], v[1], v[2], v[3]);
}
inline __device__ void GPU_FFT8(float2* v) {
GPU_FFT2(v[0], v[4]);
GPU_FFT2(v[1], v[5]);
GPU_FFT2(v[2], v[6]);
GPU_FFT2(v[3], v[7]);
v[5] = (v[5] * exp_1_8) * M_SQRT1_2;
v[6] = v[6] * exp_1_4;
v[7] = (v[7] * exp_3_8) * M_SQRT1_2;
GPU_FFT4(v[0], v[1], v[2], v[3]);
GPU_FFT4(v[4], v[5], v[6], v[7]);
}
inline __device__ void GPU_FFT16(float2* v) {
GPU_FFT4(v[0], v[4], v[8], v[12]);
GPU_FFT4(v[1], v[5], v[9], v[13]);
GPU_FFT4(v[2], v[6], v[10], v[14]);
GPU_FFT4(v[3], v[7], v[11], v[15]);
v[5] = (v[5] * exp_1_8) * M_SQRT1_2;
v[6] = v[6] * exp_1_4;
v[7] = (v[7] * exp_3_8) * M_SQRT1_2;
v[9] = v[9] * exp_1_16;
v[10] = (v[10] * exp_1_8) * M_SQRT1_2;
v[11] = v[11] * exp_3_16;
v[13] = v[13] * exp_3_16;
v[14] = (v[14] * exp_3_8) * M_SQRT1_2;
v[15] = v[15] * exp_9_16;
GPU_FFT4(v[0], v[1], v[2], v[3]);
GPU_FFT4(v[4], v[5], v[6], v[7]);
GPU_FFT4(v[8], v[9], v[10], v[11]);
GPU_FFT4(v[12], v[13], v[14], v[15]);
}
__device__ int GPU_expand(int idxL, int N1, int N2) {
return (idxL / N1) * N1 * N2 + (idxL % N1);
}
__device__ void GPU_FftIteration(int j,
int Ns,
float2* data0,
float2* data1,
int N) {
float2 v[R];
int idxS = j;
float angle = -2 * M_PI * (j % Ns) / (Ns * R);
for (int r = 0; r < R; r++) {
v[r] = data0[idxS + r * N / R];
v[r] = v[r] * make_float2(cos(r * angle), sin(r * angle));
}
#if R == 2
GPU_FFT2(v);
#endif
#if R == 4
GPU_FFT4(v);
#endif
#if R == 8
GPU_FFT8(v);
#endif
#if R == 16
GPU_FFT16(v);
#endif
int idxD = GPU_expand(j, Ns, R);
for (int r = 0; r < R; r++) {
data1[idxD + r * Ns] = v[r];
}
}
__global__ void GPU_FFT_Global(int Ns, float2* data0, float2* data1, int N) {
data0 += bx * N;
data1 += bx * N;
GPU_FftIteration(tx, Ns, data0, data1, N);
}
// int main( int argc, char **argv )
int main_fft(hipStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
int n_bytes;
int N, B;
struct pb_TimerSet timers;
// struct pb_Parameters *params;
/*params = pb_ReadParameters(&argc, argv);
if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL))
{
fprintf(stderr, "Expecting one input filename\n");
exit(-1);
}
int err = 0;*/
N = 256;
B = 1024;
/*if(argc != 3)
err |= 1;
else {
char* numend;
N = strtol(argv[1], &numend, 10);
if(numend == argv[1])
err |= 2;
B = strtol(argv[2], &numend, 10);
if(numend == argv[2])
err |= 4;
}
if(err)
{
fprintf(stderr, "Expecting two integers for N and B\n");
exit(-1);
}*/
n_bytes = N * B * sizeof(float2);
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
float2* source = (float2*)malloc(n_bytes);
float2* result = (float2*)malloc(n_bytes);
char* file = (char*)"/home/pratheek-htc/masksim/v3.x/pthread_benchmark/FFT/array.bin";
// inputData(file,(float*)source,N*B*2);
FILE* fid = fopen(file, "r");
if (fid == NULL) {
fprintf(stderr, "Cannot open input file\n");
exit(-1);
}
fread((float*)source, sizeof(float), N * B * 2, fid);
fclose(fid);
// allocate device memory
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
float2 *d_source, *d_work;
hipMalloc((void**)&d_source, n_bytes);
CUERR;
// copy host memory to device
hipMemcpyAsync(d_source, source, n_bytes, hipMemcpyHostToDevice,
stream_app);
CUERR;
hipMalloc((void**)&d_work, n_bytes);
CUERR;
hipMemset(d_work, 0, n_bytes);
CUERR;
pb_SwitchToTimer(&timers, pb_TimerID_GPU);
for (int Ns = 1; Ns < N; Ns *= R) {
hipLaunchKernelGGL(( GPU_FFT_Global), dim3(dim3(B)), dim3(dim3(N / R)), 0, stream_app, Ns, d_source,
d_work, N);
float2* tmp = d_source;
d_source = d_work;
d_work = tmp;
}
printf("I am out from fft kernel launch\n");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
// copy device memory to host
hipMemcpyAsync(result, d_source, n_bytes, hipMemcpyDeviceToHost,
stream_app);
CUERR;
if (flag)
cutilSafeCall(hipStreamSynchronize(stream_app));
hipFree(d_source);
CUERR;
hipFree(d_work);
CUERR;
/*if (params->outFile)
{
Write result to file
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(params->outFile, (float*)result, N*B*2);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}*/
free(source);
free(result);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
return 0;
}
| 6969a31ac8410a101a2c4733573063c6504fdb4e.cu | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <cuda.h>
#include <stdio.h>
#include "parboil.h"
//#include "file.h"
#include "../benchmark_common.h"
#define CUERR \
{ \
cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; \
} \
}
// Block index
#define bx blockIdx.x
#define by blockIdx.y
// Thread index
#define tx threadIdx.x
// Possible values are 2, 4, 8 and 16
#define R 2
inline __device__ float2 operator*(float2 a, float2 b) {
return make_float2(a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x);
}
inline __device__ float2 operator+(float2 a, float2 b) {
return make_float2(a.x + b.x, a.y + b.y);
}
inline __device__ float2 operator-(float2 a, float2 b) {
return make_float2(a.x - b.x, a.y - b.y);
}
inline __device__ float2 operator*(float2 a, float b) {
return make_float2(b * a.x, b * a.y);
}
#define COS_PI_8 0.923879533f
#define SIN_PI_8 0.382683432f
#define exp_1_16 make_float2(COS_PI_8, -SIN_PI_8)
#define exp_3_16 make_float2(SIN_PI_8, -COS_PI_8)
#define exp_5_16 make_float2(-SIN_PI_8, -COS_PI_8)
#define exp_7_16 make_float2(-COS_PI_8, -SIN_PI_8)
#define exp_9_16 make_float2(-COS_PI_8, SIN_PI_8)
#define exp_1_8 make_float2(1, -1)
#define exp_1_4 make_float2(0, -1)
#define exp_3_8 make_float2(-1, -1)
__device__ void GPU_FFT2(float2& v1, float2& v2) {
float2 v0 = v1;
v1 = v0 + v2;
v2 = v0 - v2;
}
__device__ void GPU_FFT4(float2& v0, float2& v1, float2& v2, float2& v3) {
GPU_FFT2(v0, v2);
GPU_FFT2(v1, v3);
v3 = v3 * exp_1_4;
GPU_FFT2(v0, v1);
GPU_FFT2(v2, v3);
}
inline __device__ void GPU_FFT2(float2* v) {
GPU_FFT2(v[0], v[1]);
}
inline __device__ void GPU_FFT4(float2* v) {
GPU_FFT4(v[0], v[1], v[2], v[3]);
}
inline __device__ void GPU_FFT8(float2* v) {
GPU_FFT2(v[0], v[4]);
GPU_FFT2(v[1], v[5]);
GPU_FFT2(v[2], v[6]);
GPU_FFT2(v[3], v[7]);
v[5] = (v[5] * exp_1_8) * M_SQRT1_2;
v[6] = v[6] * exp_1_4;
v[7] = (v[7] * exp_3_8) * M_SQRT1_2;
GPU_FFT4(v[0], v[1], v[2], v[3]);
GPU_FFT4(v[4], v[5], v[6], v[7]);
}
inline __device__ void GPU_FFT16(float2* v) {
GPU_FFT4(v[0], v[4], v[8], v[12]);
GPU_FFT4(v[1], v[5], v[9], v[13]);
GPU_FFT4(v[2], v[6], v[10], v[14]);
GPU_FFT4(v[3], v[7], v[11], v[15]);
v[5] = (v[5] * exp_1_8) * M_SQRT1_2;
v[6] = v[6] * exp_1_4;
v[7] = (v[7] * exp_3_8) * M_SQRT1_2;
v[9] = v[9] * exp_1_16;
v[10] = (v[10] * exp_1_8) * M_SQRT1_2;
v[11] = v[11] * exp_3_16;
v[13] = v[13] * exp_3_16;
v[14] = (v[14] * exp_3_8) * M_SQRT1_2;
v[15] = v[15] * exp_9_16;
GPU_FFT4(v[0], v[1], v[2], v[3]);
GPU_FFT4(v[4], v[5], v[6], v[7]);
GPU_FFT4(v[8], v[9], v[10], v[11]);
GPU_FFT4(v[12], v[13], v[14], v[15]);
}
__device__ int GPU_expand(int idxL, int N1, int N2) {
return (idxL / N1) * N1 * N2 + (idxL % N1);
}
__device__ void GPU_FftIteration(int j,
int Ns,
float2* data0,
float2* data1,
int N) {
float2 v[R];
int idxS = j;
float angle = -2 * M_PI * (j % Ns) / (Ns * R);
for (int r = 0; r < R; r++) {
v[r] = data0[idxS + r * N / R];
v[r] = v[r] * make_float2(cos(r * angle), sin(r * angle));
}
#if R == 2
GPU_FFT2(v);
#endif
#if R == 4
GPU_FFT4(v);
#endif
#if R == 8
GPU_FFT8(v);
#endif
#if R == 16
GPU_FFT16(v);
#endif
int idxD = GPU_expand(j, Ns, R);
for (int r = 0; r < R; r++) {
data1[idxD + r * Ns] = v[r];
}
}
__global__ void GPU_FFT_Global(int Ns, float2* data0, float2* data1, int N) {
data0 += bx * N;
data1 += bx * N;
GPU_FftIteration(tx, Ns, data0, data1, N);
}
// int main( int argc, char **argv )
int main_fft(cudaStream_t stream_app, pthread_mutex_t* mutexapp, bool flag) {
int n_bytes;
int N, B;
struct pb_TimerSet timers;
// struct pb_Parameters *params;
/*params = pb_ReadParameters(&argc, argv);
if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL))
{
fprintf(stderr, "Expecting one input filename\n");
exit(-1);
}
int err = 0;*/
N = 256;
B = 1024;
/*if(argc != 3)
err |= 1;
else {
char* numend;
N = strtol(argv[1], &numend, 10);
if(numend == argv[1])
err |= 2;
B = strtol(argv[2], &numend, 10);
if(numend == argv[2])
err |= 4;
}
if(err)
{
fprintf(stderr, "Expecting two integers for N and B\n");
exit(-1);
}*/
n_bytes = N * B * sizeof(float2);
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
float2* source = (float2*)malloc(n_bytes);
float2* result = (float2*)malloc(n_bytes);
char* file = (char*)"/home/pratheek-htc/masksim/v3.x/pthread_benchmark/FFT/array.bin";
// inputData(file,(float*)source,N*B*2);
FILE* fid = fopen(file, "r");
if (fid == NULL) {
fprintf(stderr, "Cannot open input file\n");
exit(-1);
}
fread((float*)source, sizeof(float), N * B * 2, fid);
fclose(fid);
// allocate device memory
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
float2 *d_source, *d_work;
cudaMalloc((void**)&d_source, n_bytes);
CUERR;
// copy host memory to device
cudaMemcpyAsync(d_source, source, n_bytes, cudaMemcpyHostToDevice,
stream_app);
CUERR;
cudaMalloc((void**)&d_work, n_bytes);
CUERR;
cudaMemset(d_work, 0, n_bytes);
CUERR;
pb_SwitchToTimer(&timers, pb_TimerID_GPU);
for (int Ns = 1; Ns < N; Ns *= R) {
GPU_FFT_Global<<<dim3(B), dim3(N / R), 0, stream_app>>>(Ns, d_source,
d_work, N);
float2* tmp = d_source;
d_source = d_work;
d_work = tmp;
}
printf("I am out from fft kernel launch\n");
pthread_mutex_unlock(mutexapp);
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
// copy device memory to host
cudaMemcpyAsync(result, d_source, n_bytes, cudaMemcpyDeviceToHost,
stream_app);
CUERR;
if (flag)
cutilSafeCall(cudaStreamSynchronize(stream_app));
cudaFree(d_source);
CUERR;
cudaFree(d_work);
CUERR;
/*if (params->outFile)
{
Write result to file
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(params->outFile, (float*)result, N*B*2);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}*/
free(source);
free(result);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
return 0;
}
|
bd046201ee2b0cb9bd39490318de0fbf03ffc664.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "crop_layer.h"
#include "utils.h"
#include "hip/hip_runtime.h"
#include "image.h"
}
__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c)
{
if(x < 0 || x >= w || y < 0 || y >= h) return 0;
return image[x + w*(y + c*h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb)
{
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
float delta = max - min;
v = max;
if(max == 0){
s = 0;
h = -1;
}else{
s = delta/max;
if(r == max){
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
if (h < 0) h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv)
{
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0) {
r = g = b = v;
} else {
int index = (int) floorf(h);
f = h - index;
p = v*(1-s);
q = v*(1-s*f);
t = v*(1-s*(1-f));
if(index == 0){
r = v; g = t; b = p;
} else if(index == 1){
r = q; g = v; b = p;
} else if(index == 2){
r = p; g = v; b = t;
} else if(index == 3){
r = p; g = q; b = v;
} else if(index == 4){
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c)
{
int ix = (int) floorf(x);
int iy = (int) floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
(1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
return val;
}
__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift)
{
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8*id + 0];
float r1 = rand[8*id + 1];
float r2 = rand[8*id + 2];
float r3 = rand[8*id + 3];
saturation = r0*(saturation - 1) + 1;
saturation = (r1 > .5) ? 1./saturation : saturation;
exposure = r2*(exposure - 1) + 1;
exposure = (r3 > .5) ? 1./exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w*(y + h*0)];
float g = image[x + w*(y + h*1)];
float b = image[x + w*(y + h*2)];
float3 rgb = make_float3(r,g,b);
if(train){
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
} else {
shift = 0;
}
image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5)*shift;
image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5)*shift;
image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5)*shift;
}
__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
float cx = w/2.;
float cy = h/2.;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8*b + 4];
float r5 = rand[8*b + 5];
float r6 = rand[8*b + 6];
float r7 = rand[8*b + 7];
float dw = (w - crop_width)*r4;
float dh = (h - crop_height)*r5;
flip = (flip && (r6 > .5));
angle = 2*angle*r7 - angle;
if(!train){
dw = (w - crop_width)/2.;
dh = (h - crop_height)/2.;
flip = 0;
angle = 0;
}
input += w*h*c*b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cos(angle)*(x-cx) - sin(angle)*(y-cy) + cx;
float ry = sin(angle)*(x-cx) + cos(angle)*(y-cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
<<<<<<< HEAD
<<<<<<< HEAD
extern "C" void forward_crop_layer_gpu(crop_layer layer, network net)
=======
extern "C" void forward_crop_layer_gpu(crop_layer layer, network_state state)
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
extern "C" void forward_crop_layer_gpu(crop_layer layer, network_state state)
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
{
cuda_random(layer.rand_gpu, layer.batch*8);
float radians = layer.angle*3.14159265/180.;
float scale = 2;
float translate = -1;
if(layer.noadjust){
scale = 1;
translate = 0;
}
int size = layer.batch * layer.w * layerhipLaunchKernelGGL((.h);
, , < HEAD
, , < HEAD
levels_image_kernel, cuda_gridsize(size), BLOCK, 0, 0, 0, 0, net.input_gpu, layer.rand_gpu, layer.batch, layer.w, layer.h, net.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
=======
hipLaunchKernelGGL(( levels_image_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
hipLaunchKernelGGL(( levels_image_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
check_error(hipPeekAtLastError());
size = layer.batch*layer.c*layer.out_w*layerhipLaunchKernelGGL((.out_h);
, , < HEAD
, , < HEAD
forward_crop_layer_kernel, cuda_gridsize(size), BLOCK, 0, 0, 0, 0, net.input_gpu, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, net.train, layer.flip, radians, layer.output_gpu);
=======
hipLaunchKernelGGL(( forward_crop_layer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu);
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
hipLaunchKernelGGL(( forward_crop_layer_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu);
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
check_error(hipPeekAtLastError());
/*
cuda_pull_array(layer.output_gpu, layer.output, size);
image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
translate_image(im, -translate);
scale_image(im, 1/scale);
translate_image(im2, -translate);
scale_image(im2, 1/scale);
translate_image(im3, -translate);
scale_image(im3, 1/scale);
show_image(im, "cropped");
show_image(im2, "cropped2");
show_image(im3, "cropped3");
cvWaitKey(0);
*/
}
| bd046201ee2b0cb9bd39490318de0fbf03ffc664.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "crop_layer.h"
#include "utils.h"
#include "cuda.h"
#include "image.h"
}
__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c)
{
if(x < 0 || x >= w || y < 0 || y >= h) return 0;
return image[x + w*(y + c*h)];
}
__device__ float3 rgb_to_hsv_kernel(float3 rgb)
{
float r = rgb.x;
float g = rgb.y;
float b = rgb.z;
float h, s, v;
float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
float delta = max - min;
v = max;
if(max == 0){
s = 0;
h = -1;
}else{
s = delta/max;
if(r == max){
h = (g - b) / delta;
} else if (g == max) {
h = 2 + (b - r) / delta;
} else {
h = 4 + (r - g) / delta;
}
if (h < 0) h += 6;
}
return make_float3(h, s, v);
}
__device__ float3 hsv_to_rgb_kernel(float3 hsv)
{
float h = hsv.x;
float s = hsv.y;
float v = hsv.z;
float r, g, b;
float f, p, q, t;
if (s == 0) {
r = g = b = v;
} else {
int index = (int) floorf(h);
f = h - index;
p = v*(1-s);
q = v*(1-s*f);
t = v*(1-s*(1-f));
if(index == 0){
r = v; g = t; b = p;
} else if(index == 1){
r = q; g = v; b = p;
} else if(index == 2){
r = p; g = v; b = t;
} else if(index == 3){
r = p; g = q; b = v;
} else if(index == 4){
r = t; g = p; b = v;
} else {
r = v; g = p; b = q;
}
}
r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
return make_float3(r, g, b);
}
__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c)
{
int ix = (int) floorf(x);
int iy = (int) floorf(y);
float dx = x - ix;
float dy = y - iy;
float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
(1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
return val;
}
__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift)
{
int size = batch * w * h;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
int x = id % w;
id /= w;
int y = id % h;
id /= h;
float rshift = rand[0];
float gshift = rand[1];
float bshift = rand[2];
float r0 = rand[8*id + 0];
float r1 = rand[8*id + 1];
float r2 = rand[8*id + 2];
float r3 = rand[8*id + 3];
saturation = r0*(saturation - 1) + 1;
saturation = (r1 > .5) ? 1./saturation : saturation;
exposure = r2*(exposure - 1) + 1;
exposure = (r3 > .5) ? 1./exposure : exposure;
size_t offset = id * h * w * 3;
image += offset;
float r = image[x + w*(y + h*0)];
float g = image[x + w*(y + h*1)];
float b = image[x + w*(y + h*2)];
float3 rgb = make_float3(r,g,b);
if(train){
float3 hsv = rgb_to_hsv_kernel(rgb);
hsv.y *= saturation;
hsv.z *= exposure;
rgb = hsv_to_rgb_kernel(hsv);
} else {
shift = 0;
}
image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5)*shift;
image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5)*shift;
image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5)*shift;
}
__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= size) return;
float cx = w/2.;
float cy = h/2.;
int count = id;
int j = id % crop_width;
id /= crop_width;
int i = id % crop_height;
id /= crop_height;
int k = id % c;
id /= c;
int b = id;
float r4 = rand[8*b + 4];
float r5 = rand[8*b + 5];
float r6 = rand[8*b + 6];
float r7 = rand[8*b + 7];
float dw = (w - crop_width)*r4;
float dh = (h - crop_height)*r5;
flip = (flip && (r6 > .5));
angle = 2*angle*r7 - angle;
if(!train){
dw = (w - crop_width)/2.;
dh = (h - crop_height)/2.;
flip = 0;
angle = 0;
}
input += w*h*c*b;
float x = (flip) ? w - dw - j - 1 : j + dw;
float y = i + dh;
float rx = cos(angle)*(x-cx) - sin(angle)*(y-cy) + cx;
float ry = sin(angle)*(x-cx) + cos(angle)*(y-cy) + cy;
output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
}
<<<<<<< HEAD
<<<<<<< HEAD
extern "C" void forward_crop_layer_gpu(crop_layer layer, network net)
=======
extern "C" void forward_crop_layer_gpu(crop_layer layer, network_state state)
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
extern "C" void forward_crop_layer_gpu(crop_layer layer, network_state state)
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
{
cuda_random(layer.rand_gpu, layer.batch*8);
float radians = layer.angle*3.14159265/180.;
float scale = 2;
float translate = -1;
if(layer.noadjust){
scale = 1;
translate = 0;
}
int size = layer.batch * layer.w * layer.h;
<<<<<<< HEAD
<<<<<<< HEAD
levels_image_kernel<<<cuda_gridsize(size), BLOCK>>>(net.input_gpu, layer.rand_gpu, layer.batch, layer.w, layer.h, net.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
=======
levels_image_kernel<<<cuda_gridsize(size), BLOCK>>>(state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
levels_image_kernel<<<cuda_gridsize(size), BLOCK>>>(state.input, layer.rand_gpu, layer.batch, layer.w, layer.h, state.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
check_error(cudaPeekAtLastError());
size = layer.batch*layer.c*layer.out_w*layer.out_h;
<<<<<<< HEAD
<<<<<<< HEAD
forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK>>>(net.input_gpu, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, net.train, layer.flip, radians, layer.output_gpu);
=======
forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK>>>(state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu);
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
forward_crop_layer_kernel<<<cuda_gridsize(size), BLOCK>>>(state.input, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, state.train, layer.flip, radians, layer.output_gpu);
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
check_error(cudaPeekAtLastError());
/*
cuda_pull_array(layer.output_gpu, layer.output, size);
image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
translate_image(im, -translate);
scale_image(im, 1/scale);
translate_image(im2, -translate);
scale_image(im2, 1/scale);
translate_image(im3, -translate);
scale_image(im3, 1/scale);
show_image(im, "cropped");
show_image(im2, "cropped2");
show_image(im3, "cropped3");
cvWaitKey(0);
*/
}
|
a25efbf90125005b090eb36a692a5e698b6ca432.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <torch/library.h>
#include <THH/THHAtomics.cuh>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
int height,
int width,
T y,
T x,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void ps_roi_align_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
const T* rois,
int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_input, height, width, y, x, index);
out_sum += val;
}
}
out_sum /= count;
output[index] = out_sum;
channel_mapping[index] = c_in;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
int height,
int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
}
template <typename T>
__global__ void ps_roi_align_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* channel_mapping,
int num_rois,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
// Force too small ROIs to be 1x1
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
const T grad_output_this_bin = grad_output[index];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = grad_output_this_bin * w1 / count;
T g2 = grad_output_this_bin * w2 / count;
T g3 = grad_output_this_bin * w3 / count;
T g4 = grad_output_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(grad_input_offset + y_low * width + x_low, g1);
atomicAdd(grad_input_offset + y_low * width + x_high, g2);
atomicAdd(grad_input_offset + y_high * width + x_low, g3);
atomicAdd(grad_input_offset + y_high * width + x_high, g4);
} // if
} // ix
} // iy
}
}
std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ps_roi_align_forward_kernel";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ps_roi_align_forward_kernel", [&] {
hipLaunchKernelGGL(( ps_roi_align_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois_.data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(hipGetLastError());
hipDeviceSynchronize();
return std::make_tuple(output, channel_mapping);
}
at::Tensor ps_roi_align_backward_kernel(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "ps_roi_align_backward_kernel";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ps_roi_align_backward_kernel", [&] {
hipLaunchKernelGGL(( ps_roi_align_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad_.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl("ps_roi_align", ps_roi_align_forward_kernel);
m.impl("_ps_roi_align_backward", ps_roi_align_backward_kernel);
}
} // namespace ops
} // namespace vision
| a25efbf90125005b090eb36a692a5e698b6ca432.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <torch/library.h>
#include <THC/THCAtomics.cuh>
#include "cuda_helpers.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
int height,
int width,
T y,
T x,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void ps_roi_align_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
const T* rois,
int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_input, height, width, y, x, index);
out_sum += val;
}
}
out_sum /= count;
output[index] = out_sum;
channel_mapping[index] = c_in;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
int height,
int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
}
template <typename T>
__global__ void ps_roi_align_backward_kernel_impl(
int nthreads,
const T* grad_output,
const int* channel_mapping,
int num_rois,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5);
T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5);
T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5);
T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5);
// Force too small ROIs to be 1x1
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
// Do not using floor/ceil; this implementation detail is critical
T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h;
T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w;
const T grad_output_this_bin = grad_output[index];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = hstart +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = wstart +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = grad_output_this_bin * w1 / count;
T g2 = grad_output_this_bin * w2 / count;
T g3 = grad_output_this_bin * w3 / count;
T g4 = grad_output_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(grad_input_offset + y_low * width + x_low, g1);
atomicAdd(grad_input_offset + y_low * width + x_high, g2);
atomicAdd(grad_input_offset + y_high * width + x_low, g3);
atomicAdd(grad_input_offset + y_high * width + x_high, g4);
} // if
} // ix
} // iy
}
}
std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ps_roi_align_forward_kernel";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
TORCH_CHECK(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "ps_roi_align_forward_kernel", [&] {
ps_roi_align_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois_.data_ptr<scalar_t>(),
channels_out,
output.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
cudaDeviceSynchronize();
return std::make_tuple(output, channel_mapping);
}
at::Tensor ps_roi_align_backward_kernel(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width) {
// Check if input tensors are CUDA tensors
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(
channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "ps_roi_align_backward_kernel";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ps_roi_align_backward_kernel", [&] {
ps_roi_align_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad_.data_ptr<scalar_t>(),
channel_mapping.data_ptr<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
channels_out,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace
TORCH_LIBRARY_IMPL(torchvision, CUDA, m) {
m.impl("ps_roi_align", ps_roi_align_forward_kernel);
m.impl("_ps_roi_align_backward", ps_roi_align_backward_kernel);
}
} // namespace ops
} // namespace vision
|
6810eb791272343942802bc65f17e120cb497575.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "exec_time.h"
#define DATASET_SIZE 1024
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *d_x, float *d_y)
{
for (int i = 0; i < n; ++i) {
d_y[i] = d_x[i] + d_y[i];
}
}
int main_func(int argc, char **argv)
{
float *h_x, *h_y;
float *d_x, *d_y;
hipError_t hipError_t;
int i;
struct timeval start, stop;
// Disable buffering entirely
setbuf(stdout, NULL);
// Allocating arrays on host
printf("Allocating arrays h_x and h_y on host...");
gettimeofday(&start, NULL);
h_x = (float*)malloc(DATASET_SIZE*sizeof(float));
h_y = (float*)malloc(DATASET_SIZE*sizeof(float));
// check malloc memory allocation
if (h_x == NULL || h_y == NULL) {
printf("Error: malloc unable to allocate memory on host.");
return 1;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Allocating array on device
printf("Allocating array d_x and d_y on device...");
gettimeofday(&start, NULL);
hipError_t = hipMalloc(&d_x, DATASET_SIZE*sizeof(float));
// check hipMalloc memory allocation
if (hipError_t != hipSuccess) {
printf("hipMalloc d_x returned error %s (code %d)\n", hipGetErrorString(hipError_t), hipError_t);
return 1;
}
hipError_t = hipMalloc(&d_y, DATASET_SIZE*sizeof(float));
// check hipMalloc memory allocation
if (hipError_t != hipSuccess) {
printf("hipMalloc d_y returned error %s (code %d)\n", hipGetErrorString(hipError_t), hipError_t);
return 1;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Initialize host memory
printf("Initializing array h_x and h_y on host...");
gettimeofday(&start, NULL);
for (i =0; i < DATASET_SIZE; ++i) {
h_x[i] = 1.0f;
h_y[i] = 2.0f;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Copy array from host to device
printf("Copying arrays from host to device...");
gettimeofday(&start, NULL);
hipError_t = hipMemcpy(d_x, h_x, DATASET_SIZE*sizeof(float), hipMemcpyHostToDevice);
if (hipError_t != hipSuccess) {
printf("hipMemcpy (h_x -> d_x) returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__);
return 1;
}
hipError_t = hipMemcpy(d_y, h_y, DATASET_SIZE*sizeof(float), hipMemcpyHostToDevice);
if (hipError_t != hipSuccess) {
printf("hipMemcpy (h_x -> d_x) returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__);
return 1;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Run kernel on elements on the GPU
printf("Running kernel on elemnts of d_x and d_y...");
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, DATASET_SIZE, d_x, d_y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Copy array from device to host
printf("Copying array from device (d_y) to host (h_y)...");
gettimeofday(&start, NULL);
hipError_t = hipMemcpy(h_y, d_y, DATASET_SIZE*sizeof(float), hipMemcpyDeviceToHost);
if (hipError_t != hipSuccess)
{
printf("hipMemcpy (d_y -> h_y) returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__);
return 1;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Check for errors (all values should be 3.0f)
printf("Checking for processing errors...");
gettimeofday(&start, NULL);
float maxError = 0.0f;
float diffError = 0.0f;
for (i = 0; i < DATASET_SIZE; i++) {
maxError = (maxError > (diffError=fabs(h_y[i]-3.0f)))? maxError : diffError;
//printf("%d -> %f\n", i, h_y[i]);
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
printf("Max error: %f\n", maxError);
// Free memory
printf("Freeing memory...");
gettimeofday(&start, NULL);
hipFree(d_x);
hipFree(d_y);
free(h_x);
free(h_y);
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
return 0;
}
| 6810eb791272343942802bc65f17e120cb497575.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "exec_time.h"
#define DATASET_SIZE 1024
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *d_x, float *d_y)
{
for (int i = 0; i < n; ++i) {
d_y[i] = d_x[i] + d_y[i];
}
}
int main_func(int argc, char **argv)
{
float *h_x, *h_y;
float *d_x, *d_y;
cudaError_t cudaError;
int i;
struct timeval start, stop;
// Disable buffering entirely
setbuf(stdout, NULL);
// Allocating arrays on host
printf("Allocating arrays h_x and h_y on host...");
gettimeofday(&start, NULL);
h_x = (float*)malloc(DATASET_SIZE*sizeof(float));
h_y = (float*)malloc(DATASET_SIZE*sizeof(float));
// check malloc memory allocation
if (h_x == NULL || h_y == NULL) {
printf("Error: malloc unable to allocate memory on host.");
return 1;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Allocating array on device
printf("Allocating array d_x and d_y on device...");
gettimeofday(&start, NULL);
cudaError = cudaMalloc(&d_x, DATASET_SIZE*sizeof(float));
// check cudaMalloc memory allocation
if (cudaError != cudaSuccess) {
printf("cudaMalloc d_x returned error %s (code %d)\n", cudaGetErrorString(cudaError), cudaError);
return 1;
}
cudaError = cudaMalloc(&d_y, DATASET_SIZE*sizeof(float));
// check cudaMalloc memory allocation
if (cudaError != cudaSuccess) {
printf("cudaMalloc d_y returned error %s (code %d)\n", cudaGetErrorString(cudaError), cudaError);
return 1;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Initialize host memory
printf("Initializing array h_x and h_y on host...");
gettimeofday(&start, NULL);
for (i =0; i < DATASET_SIZE; ++i) {
h_x[i] = 1.0f;
h_y[i] = 2.0f;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Copy array from host to device
printf("Copying arrays from host to device...");
gettimeofday(&start, NULL);
cudaError = cudaMemcpy(d_x, h_x, DATASET_SIZE*sizeof(float), cudaMemcpyHostToDevice);
if (cudaError != cudaSuccess) {
printf("cudaMemcpy (h_x -> d_x) returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__);
return 1;
}
cudaError = cudaMemcpy(d_y, h_y, DATASET_SIZE*sizeof(float), cudaMemcpyHostToDevice);
if (cudaError != cudaSuccess) {
printf("cudaMemcpy (h_x -> d_x) returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__);
return 1;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Run kernel on elements on the GPU
printf("Running kernel on elemnts of d_x and d_y...");
gettimeofday(&start, NULL);
add<<<1, 1>>>(DATASET_SIZE, d_x, d_y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Copy array from device to host
printf("Copying array from device (d_y) to host (h_y)...");
gettimeofday(&start, NULL);
cudaError = cudaMemcpy(h_y, d_y, DATASET_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
if (cudaError != cudaSuccess)
{
printf("cudaMemcpy (d_y -> h_y) returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__);
return 1;
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
// Check for errors (all values should be 3.0f)
printf("Checking for processing errors...");
gettimeofday(&start, NULL);
float maxError = 0.0f;
float diffError = 0.0f;
for (i = 0; i < DATASET_SIZE; i++) {
maxError = (maxError > (diffError=fabs(h_y[i]-3.0f)))? maxError : diffError;
//printf("%d -> %f\n", i, h_y[i]);
}
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
printf("Max error: %f\n", maxError);
// Free memory
printf("Freeing memory...");
gettimeofday(&start, NULL);
cudaFree(d_x);
cudaFree(d_y);
free(h_x);
free(h_y);
gettimeofday(&stop, NULL);
printf("%f ms\n", timedifference_msec(start, stop));
return 0;
}
|
8f0939b644437af65786edf8d7ccf86fc97c233b.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/LeakyReLU.hip"
#else
#include <THHUNN/common.h>
void THNN_(LeakyReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
accreal negval_,
bool inplace)
{
scalar_t negval = ScalarConvert<accreal, scalar_t>::to(negval_);
THCUNN_assertSameGPU(state, 2, input, output);
if (inplace)
{
THC_pointwiseApply1<scalar_t>(state, input, LeakyReLUUpdateOutputIP<scalar_t>(negval));
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, LeakyReLUUpdateOutput<scalar_t>(negval));
}
// THCudaCheck(hipGetLastError());
}
void THNN_(LeakyReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
accreal negval_,
bool inplace)
{
scalar_t negval = ScalarConvert<accreal, scalar_t>::to(negval_);
THCUNN_check_nElement(state, input, gradOutput);
THCUNN_assertSameGPU(state, 3, input, gradInput, gradOutput);
if (inplace)
{
THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, LeakyReLUUpdateGradInputIP<scalar_t>(negval));
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, LeakyReLUUpdateGradInput<scalar_t>(negval));
}
// THCudaCheck(hipGetLastError());
}
#endif
| 8f0939b644437af65786edf8d7ccf86fc97c233b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/LeakyReLU.cu"
#else
#include <THCUNN/common.h>
void THNN_(LeakyReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
accreal negval_,
bool inplace)
{
scalar_t negval = ScalarConvert<accreal, scalar_t>::to(negval_);
THCUNN_assertSameGPU(state, 2, input, output);
if (inplace)
{
THC_pointwiseApply1<scalar_t>(state, input, LeakyReLUUpdateOutputIP<scalar_t>(negval));
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, LeakyReLUUpdateOutput<scalar_t>(negval));
}
// THCudaCheck(cudaGetLastError());
}
void THNN_(LeakyReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
accreal negval_,
bool inplace)
{
scalar_t negval = ScalarConvert<accreal, scalar_t>::to(negval_);
THCUNN_check_nElement(state, input, gradOutput);
THCUNN_assertSameGPU(state, 3, input, gradInput, gradOutput);
if (inplace)
{
THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, LeakyReLUUpdateGradInputIP<scalar_t>(negval));
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, input, gradOutput, LeakyReLUUpdateGradInput<scalar_t>(negval));
}
// THCudaCheck(cudaGetLastError());
}
#endif
|
d0996475a9a396f7aefa9e5025027996861ed6e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault));
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N));
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(fabs, y[index] = fabs(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| d0996475a9a396f7aefa9e5025027996861ed6e5.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault));
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N));
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(fabs, y[index] = fabs(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
436cb394fac96c421ddcd2e147f38867b1f997b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
// INSERT KERNEL CODE HERE
//float *d_A,*d_B,*d_C;
//int size = n*sizeof(float);
/*hipError_t err =*/// hipMalloc(&d_A,size);
/*if (err != hipSuccess) {
printf(%s in %s at line %d\n, __LINE__); exit(EXIT_FAILURE);
}
err =*/// hipMalloc(&d_B,size);
/*if (err != hipSuccess) {
printf(%s in %s at line %d\n, __LINE__); exit(EXIT_FAILURE);
}
// err = */ // hipMalloc(&d_C,size);
/*if (err != hipSuccess) {
printf(%s in %s at line %d\n, __LINE__); exit(EXIT_FAILURE);
}*/
// hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
// hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
// int i = 0;
// for(i=0;i<n;i++){
// d_C[i] = d_A[i] + d_B[i];
// }
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<n) C[i] = A[i] + B[i];
}
void basicVecAdd( float *A, float *B, float *C, int n)
{
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 256; //changed from 512
//INSERT CODE HERE
dim3 DimGrid((n-1)/BLOCK_SIZE + 1, 1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
hipLaunchKernelGGL(( VecAdd), dim3(DimGrid),dim3(DimBlock), 0, 0, n,A,B,C);
//dim3 DimGrid(ceil(n/256),1,1);
//dim3 DimBlock(256,1,1);
//vecAddKernel<<<DimGrid,DimBlock>>(d_A,d_B,d_C,n);
}
| 436cb394fac96c421ddcd2e147f38867b1f997b9.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
// INSERT KERNEL CODE HERE
//float *d_A,*d_B,*d_C;
//int size = n*sizeof(float);
/*cudaError_t err =*/// cudaMalloc(&d_A,size);
/*if (err != cudaSuccess) {
printf(“%s in %s at line %d\n”, __LINE__); exit(EXIT_FAILURE);
}
err =*/// cudaMalloc(&d_B,size);
/*if (err != cudaSuccess) {
printf(“%s in %s at line %d\n”, __LINE__); exit(EXIT_FAILURE);
}
// err = */ // cudaMalloc(&d_C,size);
/*if (err != cudaSuccess) {
printf(“%s in %s at line %d\n”, __LINE__); exit(EXIT_FAILURE);
}*/
// cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
// int i = 0;
// for(i=0;i<n;i++){
// d_C[i] = d_A[i] + d_B[i];
// }
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<n) C[i] = A[i] + B[i];
}
void basicVecAdd( float *A, float *B, float *C, int n)
{
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = 256; //changed from 512
//INSERT CODE HERE
dim3 DimGrid((n-1)/BLOCK_SIZE + 1, 1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
VecAdd<<<DimGrid,DimBlock>>>(n,A,B,C);
//dim3 DimGrid(ceil(n/256),1,1);
//dim3 DimBlock(256,1,1);
//vecAddKernel<<<DimGrid,DimBlock>>(d_A,d_B,d_C,n);
}
|
86f9c46d6a9b6582bf3da512daed349b6bffd87f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "CellListGPU.cuh"
#include "hoomd/extern/util/mgpucontext.h"
#include "hoomd/extern/kernels/localitysort.cuh"
#include <thrust/device_vector.h>
#include <thrust/sort.h>
/*! \file CellListGPU.cu
\brief Defines GPU kernel code for cell list generation on the GPU
*/
//! Kernel that computes the cell list on the GPU
/*! \param d_cell_size Number of particles in each cell
\param d_xyzf Cell XYZF data array
\param d_tdb Cell TDB data array
\param d_cell_orientation Particle orientation in cell list
\param d_cell_idx Particle index in cell list
\param d_conditions Conditions flags for detecting overflow and other error conditions
\param d_pos Particle position array
\param d_orientation Particle orientation array
\param d_charge Particle charge array
\param d_diameter Particle diameter array
\param d_body Particle body array
\param N Number of particles
\param n_ghost Number of ghost particles
\param Nmax Maximum number of particles that can be placed in a single cell
\param flag_charge Set to true to store charge in the flag position in \a d_xyzf
\param flag_type Set to true to store type in the flag position in \a d_xyzf
\param box Box dimensions
\param ci Indexer to compute cell id from cell grid coords
\param cli Indexer to index into \a d_xyzf and \a d_tdb
\param ghost_width Width of ghost layer
\note Optimized for Fermi
*/
__global__ void gpu_compute_cell_list_kernel(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_tdb,
Scalar4 *d_cell_orientation,
unsigned int *d_cell_idx,
uint3 *d_conditions,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar *d_charge,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim box,
const Index3D ci,
const Index2D cli,
const Scalar3 ghost_width,
const unsigned int nwork,
const unsigned int offset)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nwork)
return;
idx += offset;
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
Scalar flag = 0;
Scalar diameter = 0;
Scalar body = 0;
Scalar type = postype.w;
Scalar4 orientation = make_scalar4(0,0,0,0);
if (d_tdb != NULL)
{
diameter = d_diameter[idx];
body = __int_as_scalar(d_body[idx]);
}
if (d_cell_orientation != NULL)
{
orientation = d_orientation[idx];
}
if (flag_charge)
flag = d_charge[idx];
else if (flag_type)
flag = type;
else
flag = __int_as_scalar(idx);
// check for nan pos
if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z))
{
(*d_conditions).y = idx+1;
return;
}
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos,ghost_width);
// check if the particle is inside the unit cell + ghost layer in all dimensions
if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) ||
(f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) ||
(f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) )
{
// if a ghost particle is out of bounds, silently ignore it
if (idx < N)
(*d_conditions).z = idx+1;
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
unsigned int bin = ci(ib, jb, kb);
// all particles should be in a valid cell
// all particles should be in a valid cell
if (ib < 0 || ib >= (int)ci.getW() ||
jb < 0 || jb >= (int)ci.getH() ||
kb < 0 || kb >= (int)ci.getD())
{
// but ghost particles that are out of range should not produce an error
if (idx < N)
{
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).z, idx+1);
#else
atomicMax(&(*d_conditions).z, idx+1);
#endif
}
return;
}
unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff);
if (size < Nmax)
{
unsigned int write_pos = cli(size, bin);
if (d_xyzf != NULL)
d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag);
if (d_tdb != NULL)
d_tdb[write_pos] = make_scalar4(type, diameter, body, 0);
if (d_cell_orientation != NULL)
d_cell_orientation[write_pos] = orientation;
if (d_cell_idx != NULL)
d_cell_idx[write_pos] = idx;
}
else
{
// handle overflow
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).x, size+1);
#else
atomicMax(&(*d_conditions).x, size+1);
#endif
}
}
hipError_t gpu_compute_cell_list(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_tdb,
Scalar4 *d_cell_orientation,
unsigned int *d_cell_idx,
uint3 *d_conditions,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar *d_charge,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim& box,
const Index3D& ci,
const Index2D& cli,
const Scalar3& ghost_width,
const unsigned int block_size,
const GPUPartition& gpu_partition)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)gpu_compute_cell_list_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// process ghosts in final range
if (idev == (int)gpu_partition.getNumActiveGPUs()-1)
nwork += n_ghost;
unsigned int run_block_size = min(block_size, max_block_size);
int n_blocks = nwork/run_block_size + 1;
hipLaunchKernelGGL(( gpu_compute_cell_list_kernel), dim3(n_blocks), dim3(run_block_size), 0, 0, d_cell_size+idev*ci.getNumElements(),
d_xyzf ? d_xyzf+idev*cli.getNumElements() : 0,
d_tdb ? d_tdb+idev*cli.getNumElements() : 0,
d_cell_orientation ? d_cell_orientation+idev*cli.getNumElements() : 0,
d_cell_idx ? d_cell_idx+idev*cli.getNumElements() : 0,
d_conditions,
d_pos,
d_orientation,
d_charge,
d_diameter,
d_body,
N,
n_ghost,
Nmax,
flag_charge,
flag_type,
box,
ci,
cli,
ghost_width,
nwork,
range.first);
}
return hipSuccess;
}
__global__ void gpu_fill_indices_kernel(
unsigned int cl_size,
uint2 *d_idx,
unsigned int *d_sort_permutation,
unsigned int *d_cell_idx,
unsigned int *d_cell_size,
Index3D ci,
Index2D cli
)
{
unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (cell_idx >= cl_size) return;
unsigned int icell = cell_idx / cli.getW();
unsigned int pidx = UINT_MAX;
if (icell < ci.getNumElements())
{
unsigned int my_cell_size = d_cell_size[icell];
unsigned int ilocal = cell_idx % cli.getW();
if (ilocal < my_cell_size)
{
pidx = d_cell_idx[cell_idx];
}
}
// pack cell idx and particle idx into uint2
uint2 result;
result.x = icell;
result.y = pidx;
// write out result
d_idx[cell_idx] = result;
// write identity permutation
d_sort_permutation[cell_idx] = cell_idx;
}
//! Lexicographic comparison operator on uint2
struct comp_less_uint2
{
__device__ bool operator()(const uint2& a, const uint2& b)
{
return a.x < b.x || (a.x == b.x && a.y < b.y);
}
};
//! Kernel to combine ngpu cell lists into one, in parallel
__global__ void gpu_combine_cell_lists_kernel(
const unsigned int *d_cell_size_scratch,
unsigned int *d_cell_size,
const unsigned int *d_idx_scratch,
unsigned int *d_idx,
const Scalar4 *d_xyzf_scratch,
Scalar4 *d_xyzf,
const Scalar4 *d_tdb_scratch,
Scalar4 *d_tdb,
const Scalar4 *d_cell_orientation_scratch,
Scalar4 *d_cell_orientation,
const Index2D cli,
unsigned int igpu,
unsigned int ngpu,
const unsigned int Nmax,
uint3 *d_conditions)
{
unsigned int idx = threadIdx.x+blockIdx.x*blockDim.x;
if (idx >= cli.getNumElements())
return;
uint2 p = cli.getPair(idx);
unsigned int local_idx = p.x;
unsigned int bin = p.y;
// reduce cell sizes for 0..igpu
unsigned int local_size;
unsigned int offset = 0;
unsigned int total_size = 0;
for (unsigned int i = 0; i < ngpu; ++i)
{
unsigned int sz = d_cell_size_scratch[bin+i*cli.getH()];
if (i == igpu)
local_size = sz;
if (i < igpu)
offset += sz;
total_size += sz;
}
// write out cell size total on GPU 0
if (igpu == 0 && local_idx == 0)
d_cell_size[bin] = total_size;
// is local_idx within bounds?
if (local_idx >= local_size)
return;
unsigned int out_idx = offset + local_idx;
if (out_idx >= Nmax)
{
// handle overflow
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).x, out_idx+1);
#else
atomicMax(&(*d_conditions).x, out_idx+1);
#endif
return;
}
unsigned int write_pos = cli(out_idx, bin);
// copy over elements
if (d_idx)
d_idx[write_pos] = d_idx_scratch[idx+igpu*cli.getNumElements()];
if (d_xyzf)
d_xyzf[write_pos] = d_xyzf_scratch[idx+igpu*cli.getNumElements()];
if (d_tdb)
d_tdb[write_pos] = d_tdb_scratch[idx+igpu*cli.getNumElements()];
if (d_cell_orientation)
d_cell_orientation[write_pos] = d_cell_orientation_scratch[idx+igpu*cli.getNumElements()];
}
/*! Driver function to sort the cell lists from different GPUs into one
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size_scratch List of cell sizes (per GPU)
\param d_cell_size List of cell sizes
\param d_cell_idx_scratch List particle index (per GPU)
\param d_cell_idx List particle index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
\param block_size GPU block size
\param gpu_partition multi-GPU partition
*/
hipError_t gpu_combine_cell_lists(const unsigned int *d_cell_size_scratch,
unsigned int *d_cell_size,
const unsigned int *d_idx_scratch,
unsigned int *d_idx,
const Scalar4 *d_xyzf_scratch,
Scalar4 *d_xyzf,
const Scalar4 *d_tdb_scratch,
Scalar4 *d_tdb,
const Scalar4 *d_cell_orientation_scratch,
Scalar4 *d_cell_orientation,
const Index2D cli,
unsigned int ngpu,
const unsigned int block_size,
const unsigned int Nmax,
uint3 *d_conditions,
const GPUPartition& gpu_partition)
{
dim3 threads(block_size);
dim3 grid(cli.getNumElements()/block_size + 1);
// copy together cell lists in parallel
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
gpu_partition.getRangeAndSetGPU(idev);
hipLaunchKernelGGL(( gpu_combine_cell_lists_kernel), dim3(grid), dim3(threads), 0, 0,
d_cell_size_scratch,
d_cell_size,
d_idx_scratch,
d_idx,
d_xyzf_scratch,
d_xyzf,
d_tdb_scratch,
d_tdb,
d_cell_orientation_scratch,
d_cell_orientation,
cli,
idev,
ngpu,
Nmax,
d_conditions);
}
return hipSuccess;
}
__global__ void gpu_apply_sorted_cell_list_order(
unsigned int cl_size,
unsigned int *d_cell_idx,
unsigned int *d_cell_idx_new,
Scalar4 *d_xyzf,
Scalar4 *d_xyzf_new,
Scalar4 *d_tdb,
Scalar4 *d_tdb_new,
Scalar4 *d_cell_orientation,
Scalar4 *d_cell_orientation_new,
unsigned int *d_sort_permutation,
Index2D cli)
{
unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (cell_idx >= cl_size)
return;
unsigned int perm_idx = d_sort_permutation[cell_idx];
if (d_xyzf) d_xyzf_new[cell_idx] = d_xyzf[perm_idx];
if (d_cell_idx) d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx];
if (d_tdb) d_tdb_new[cell_idx] = d_tdb[perm_idx];
if (d_cell_orientation) d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx];
}
/*! Driver function to sort the cell list on the GPU
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size List of cell sizes
\param d_xyzf List of coordinates and flag
\param d_tdb List type diameter and body index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
\param mgpu_context ModernGPU context
*/
hipError_t gpu_sort_cell_list(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_xyzf_new,
Scalar4 *d_tdb,
Scalar4 *d_tdb_new,
Scalar4 *d_cell_orientation,
Scalar4 *d_cell_orientation_new,
unsigned int *d_cell_idx,
unsigned int *d_cell_idx_new,
uint2 *d_sort_idx,
unsigned int *d_sort_permutation,
const Index3D ci,
const Index2D cli)
{
unsigned int block_size = 256;
// fill indices table with cell idx/particle idx pairs
dim3 threads(block_size);
dim3 grid(cli.getNumElements()/block_size + 1);
hipLaunchKernelGGL(( gpu_fill_indices_kernel), dim3(grid), dim3(threads), 0, 0,
cli.getNumElements(),
d_sort_idx,
d_sort_permutation,
d_cell_idx,
d_cell_size,
ci,
cli);
// locality sort on those pairs
HOOMD_THRUST::device_ptr<uint2> d_sort_idx_thrust(d_sort_idx);
HOOMD_THRUST::device_ptr<unsigned int> d_sort_permutation_thrust(d_sort_permutation);
HOOMD_THRUST::sort_by_key(d_sort_idx_thrust, d_sort_idx_thrust + cli.getNumElements(), d_sort_permutation_thrust, comp_less_uint2());
// apply sorted order
hipLaunchKernelGGL(( gpu_apply_sorted_cell_list_order), dim3(grid), dim3(threads), 0, 0,
cli.getNumElements(),
d_cell_idx,
d_cell_idx_new,
d_xyzf,
d_xyzf_new,
d_tdb,
d_tdb_new,
d_cell_orientation,
d_cell_orientation_new,
d_sort_permutation,
cli);
// copy back permuted arrays to original ones
if (d_xyzf)
hipMemcpy(d_xyzf, d_xyzf_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice);
hipMemcpy(d_cell_idx, d_cell_idx_new, sizeof(unsigned int)*cli.getNumElements(), hipMemcpyDeviceToDevice);
if (d_tdb)
{
hipMemcpy(d_tdb, d_tdb_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice);
}
if (d_cell_orientation)
{
hipMemcpy(d_cell_orientation, d_cell_orientation_new, sizeof(Scalar4)*cli.getNumElements(), hipMemcpyDeviceToDevice);
}
return hipSuccess;
}
| 86f9c46d6a9b6582bf3da512daed349b6bffd87f.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "CellListGPU.cuh"
#include "hoomd/extern/util/mgpucontext.h"
#include "hoomd/extern/kernels/localitysort.cuh"
#include <thrust/device_vector.h>
#include <thrust/sort.h>
/*! \file CellListGPU.cu
\brief Defines GPU kernel code for cell list generation on the GPU
*/
//! Kernel that computes the cell list on the GPU
/*! \param d_cell_size Number of particles in each cell
\param d_xyzf Cell XYZF data array
\param d_tdb Cell TDB data array
\param d_cell_orientation Particle orientation in cell list
\param d_cell_idx Particle index in cell list
\param d_conditions Conditions flags for detecting overflow and other error conditions
\param d_pos Particle position array
\param d_orientation Particle orientation array
\param d_charge Particle charge array
\param d_diameter Particle diameter array
\param d_body Particle body array
\param N Number of particles
\param n_ghost Number of ghost particles
\param Nmax Maximum number of particles that can be placed in a single cell
\param flag_charge Set to true to store charge in the flag position in \a d_xyzf
\param flag_type Set to true to store type in the flag position in \a d_xyzf
\param box Box dimensions
\param ci Indexer to compute cell id from cell grid coords
\param cli Indexer to index into \a d_xyzf and \a d_tdb
\param ghost_width Width of ghost layer
\note Optimized for Fermi
*/
__global__ void gpu_compute_cell_list_kernel(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_tdb,
Scalar4 *d_cell_orientation,
unsigned int *d_cell_idx,
uint3 *d_conditions,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar *d_charge,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim box,
const Index3D ci,
const Index2D cli,
const Scalar3 ghost_width,
const unsigned int nwork,
const unsigned int offset)
{
// read in the particle that belongs to this thread
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= nwork)
return;
idx += offset;
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
Scalar flag = 0;
Scalar diameter = 0;
Scalar body = 0;
Scalar type = postype.w;
Scalar4 orientation = make_scalar4(0,0,0,0);
if (d_tdb != NULL)
{
diameter = d_diameter[idx];
body = __int_as_scalar(d_body[idx]);
}
if (d_cell_orientation != NULL)
{
orientation = d_orientation[idx];
}
if (flag_charge)
flag = d_charge[idx];
else if (flag_type)
flag = type;
else
flag = __int_as_scalar(idx);
// check for nan pos
if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z))
{
(*d_conditions).y = idx+1;
return;
}
uchar3 periodic = box.getPeriodic();
Scalar3 f = box.makeFraction(pos,ghost_width);
// check if the particle is inside the unit cell + ghost layer in all dimensions
if ((f.x < Scalar(-0.00001) || f.x >= Scalar(1.00001)) ||
(f.y < Scalar(-0.00001) || f.y >= Scalar(1.00001)) ||
(f.z < Scalar(-0.00001) || f.z >= Scalar(1.00001)) )
{
// if a ghost particle is out of bounds, silently ignore it
if (idx < N)
(*d_conditions).z = idx+1;
return;
}
// find the bin each particle belongs in
int ib = (int)(f.x * ci.getW());
int jb = (int)(f.y * ci.getH());
int kb = (int)(f.z * ci.getD());
// need to handle the case where the particle is exactly at the box hi
if (ib == ci.getW() && periodic.x)
ib = 0;
if (jb == ci.getH() && periodic.y)
jb = 0;
if (kb == ci.getD() && periodic.z)
kb = 0;
unsigned int bin = ci(ib, jb, kb);
// all particles should be in a valid cell
// all particles should be in a valid cell
if (ib < 0 || ib >= (int)ci.getW() ||
jb < 0 || jb >= (int)ci.getH() ||
kb < 0 || kb >= (int)ci.getD())
{
// but ghost particles that are out of range should not produce an error
if (idx < N)
{
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).z, idx+1);
#else
atomicMax(&(*d_conditions).z, idx+1);
#endif
}
return;
}
unsigned int size = atomicInc(&d_cell_size[bin], 0xffffffff);
if (size < Nmax)
{
unsigned int write_pos = cli(size, bin);
if (d_xyzf != NULL)
d_xyzf[write_pos] = make_scalar4(pos.x, pos.y, pos.z, flag);
if (d_tdb != NULL)
d_tdb[write_pos] = make_scalar4(type, diameter, body, 0);
if (d_cell_orientation != NULL)
d_cell_orientation[write_pos] = orientation;
if (d_cell_idx != NULL)
d_cell_idx[write_pos] = idx;
}
else
{
// handle overflow
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).x, size+1);
#else
atomicMax(&(*d_conditions).x, size+1);
#endif
}
}
cudaError_t gpu_compute_cell_list(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_tdb,
Scalar4 *d_cell_orientation,
unsigned int *d_cell_idx,
uint3 *d_conditions,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar *d_charge,
const Scalar *d_diameter,
const unsigned int *d_body,
const unsigned int N,
const unsigned int n_ghost,
const unsigned int Nmax,
const bool flag_charge,
const bool flag_type,
const BoxDim& box,
const Index3D& ci,
const Index2D& cli,
const Scalar3& ghost_width,
const unsigned int block_size,
const GPUPartition& gpu_partition)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)gpu_compute_cell_list_kernel);
max_block_size = attr.maxThreadsPerBlock;
}
// iterate over active GPUs in reverse, to end up on first GPU when returning from this function
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
// process ghosts in final range
if (idev == (int)gpu_partition.getNumActiveGPUs()-1)
nwork += n_ghost;
unsigned int run_block_size = min(block_size, max_block_size);
int n_blocks = nwork/run_block_size + 1;
gpu_compute_cell_list_kernel<<<n_blocks, run_block_size>>>(d_cell_size+idev*ci.getNumElements(),
d_xyzf ? d_xyzf+idev*cli.getNumElements() : 0,
d_tdb ? d_tdb+idev*cli.getNumElements() : 0,
d_cell_orientation ? d_cell_orientation+idev*cli.getNumElements() : 0,
d_cell_idx ? d_cell_idx+idev*cli.getNumElements() : 0,
d_conditions,
d_pos,
d_orientation,
d_charge,
d_diameter,
d_body,
N,
n_ghost,
Nmax,
flag_charge,
flag_type,
box,
ci,
cli,
ghost_width,
nwork,
range.first);
}
return cudaSuccess;
}
__global__ void gpu_fill_indices_kernel(
unsigned int cl_size,
uint2 *d_idx,
unsigned int *d_sort_permutation,
unsigned int *d_cell_idx,
unsigned int *d_cell_size,
Index3D ci,
Index2D cli
)
{
unsigned int cell_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (cell_idx >= cl_size) return;
unsigned int icell = cell_idx / cli.getW();
unsigned int pidx = UINT_MAX;
if (icell < ci.getNumElements())
{
unsigned int my_cell_size = d_cell_size[icell];
unsigned int ilocal = cell_idx % cli.getW();
if (ilocal < my_cell_size)
{
pidx = d_cell_idx[cell_idx];
}
}
// pack cell idx and particle idx into uint2
uint2 result;
result.x = icell;
result.y = pidx;
// write out result
d_idx[cell_idx] = result;
// write identity permutation
d_sort_permutation[cell_idx] = cell_idx;
}
//! Lexicographic comparison operator on uint2
struct comp_less_uint2
{
__device__ bool operator()(const uint2& a, const uint2& b)
{
return a.x < b.x || (a.x == b.x && a.y < b.y);
}
};
//! Kernel to combine ngpu cell lists into one, in parallel
__global__ void gpu_combine_cell_lists_kernel(
const unsigned int *d_cell_size_scratch,
unsigned int *d_cell_size,
const unsigned int *d_idx_scratch,
unsigned int *d_idx,
const Scalar4 *d_xyzf_scratch,
Scalar4 *d_xyzf,
const Scalar4 *d_tdb_scratch,
Scalar4 *d_tdb,
const Scalar4 *d_cell_orientation_scratch,
Scalar4 *d_cell_orientation,
const Index2D cli,
unsigned int igpu,
unsigned int ngpu,
const unsigned int Nmax,
uint3 *d_conditions)
{
unsigned int idx = threadIdx.x+blockIdx.x*blockDim.x;
if (idx >= cli.getNumElements())
return;
uint2 p = cli.getPair(idx);
unsigned int local_idx = p.x;
unsigned int bin = p.y;
// reduce cell sizes for 0..igpu
unsigned int local_size;
unsigned int offset = 0;
unsigned int total_size = 0;
for (unsigned int i = 0; i < ngpu; ++i)
{
unsigned int sz = d_cell_size_scratch[bin+i*cli.getH()];
if (i == igpu)
local_size = sz;
if (i < igpu)
offset += sz;
total_size += sz;
}
// write out cell size total on GPU 0
if (igpu == 0 && local_idx == 0)
d_cell_size[bin] = total_size;
// is local_idx within bounds?
if (local_idx >= local_size)
return;
unsigned int out_idx = offset + local_idx;
if (out_idx >= Nmax)
{
// handle overflow
#if (__CUDA_ARCH__ >= 600)
atomicMax_system(&(*d_conditions).x, out_idx+1);
#else
atomicMax(&(*d_conditions).x, out_idx+1);
#endif
return;
}
unsigned int write_pos = cli(out_idx, bin);
// copy over elements
if (d_idx)
d_idx[write_pos] = d_idx_scratch[idx+igpu*cli.getNumElements()];
if (d_xyzf)
d_xyzf[write_pos] = d_xyzf_scratch[idx+igpu*cli.getNumElements()];
if (d_tdb)
d_tdb[write_pos] = d_tdb_scratch[idx+igpu*cli.getNumElements()];
if (d_cell_orientation)
d_cell_orientation[write_pos] = d_cell_orientation_scratch[idx+igpu*cli.getNumElements()];
}
/*! Driver function to sort the cell lists from different GPUs into one
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size_scratch List of cell sizes (per GPU)
\param d_cell_size List of cell sizes
\param d_cell_idx_scratch List particle index (per GPU)
\param d_cell_idx List particle index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
\param block_size GPU block size
\param gpu_partition multi-GPU partition
*/
cudaError_t gpu_combine_cell_lists(const unsigned int *d_cell_size_scratch,
unsigned int *d_cell_size,
const unsigned int *d_idx_scratch,
unsigned int *d_idx,
const Scalar4 *d_xyzf_scratch,
Scalar4 *d_xyzf,
const Scalar4 *d_tdb_scratch,
Scalar4 *d_tdb,
const Scalar4 *d_cell_orientation_scratch,
Scalar4 *d_cell_orientation,
const Index2D cli,
unsigned int ngpu,
const unsigned int block_size,
const unsigned int Nmax,
uint3 *d_conditions,
const GPUPartition& gpu_partition)
{
dim3 threads(block_size);
dim3 grid(cli.getNumElements()/block_size + 1);
// copy together cell lists in parallel
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
gpu_partition.getRangeAndSetGPU(idev);
gpu_combine_cell_lists_kernel<<<grid, threads>>>
(
d_cell_size_scratch,
d_cell_size,
d_idx_scratch,
d_idx,
d_xyzf_scratch,
d_xyzf,
d_tdb_scratch,
d_tdb,
d_cell_orientation_scratch,
d_cell_orientation,
cli,
idev,
ngpu,
Nmax,
d_conditions);
}
return cudaSuccess;
}
__global__ void gpu_apply_sorted_cell_list_order(
unsigned int cl_size,
unsigned int *d_cell_idx,
unsigned int *d_cell_idx_new,
Scalar4 *d_xyzf,
Scalar4 *d_xyzf_new,
Scalar4 *d_tdb,
Scalar4 *d_tdb_new,
Scalar4 *d_cell_orientation,
Scalar4 *d_cell_orientation_new,
unsigned int *d_sort_permutation,
Index2D cli)
{
unsigned int cell_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (cell_idx >= cl_size)
return;
unsigned int perm_idx = d_sort_permutation[cell_idx];
if (d_xyzf) d_xyzf_new[cell_idx] = d_xyzf[perm_idx];
if (d_cell_idx) d_cell_idx_new[cell_idx] = d_cell_idx[perm_idx];
if (d_tdb) d_tdb_new[cell_idx] = d_tdb[perm_idx];
if (d_cell_orientation) d_cell_orientation_new[cell_idx] = d_cell_orientation[perm_idx];
}
/*! Driver function to sort the cell list on the GPU
This applies lexicographical order to cell idx, particle idx pairs
\param d_cell_size List of cell sizes
\param d_xyzf List of coordinates and flag
\param d_tdb List type diameter and body index
\param d_sort_idx Temporary array for storing the cell/particle indices to be sorted
\param d_sort_permutation Temporary array for storing the permuted cell list indices
\param ci Cell indexer
\param cli Cell list indexer
\param mgpu_context ModernGPU context
*/
cudaError_t gpu_sort_cell_list(unsigned int *d_cell_size,
Scalar4 *d_xyzf,
Scalar4 *d_xyzf_new,
Scalar4 *d_tdb,
Scalar4 *d_tdb_new,
Scalar4 *d_cell_orientation,
Scalar4 *d_cell_orientation_new,
unsigned int *d_cell_idx,
unsigned int *d_cell_idx_new,
uint2 *d_sort_idx,
unsigned int *d_sort_permutation,
const Index3D ci,
const Index2D cli)
{
unsigned int block_size = 256;
// fill indices table with cell idx/particle idx pairs
dim3 threads(block_size);
dim3 grid(cli.getNumElements()/block_size + 1);
gpu_fill_indices_kernel<<<grid, threads>>>
(
cli.getNumElements(),
d_sort_idx,
d_sort_permutation,
d_cell_idx,
d_cell_size,
ci,
cli);
// locality sort on those pairs
HOOMD_THRUST::device_ptr<uint2> d_sort_idx_thrust(d_sort_idx);
HOOMD_THRUST::device_ptr<unsigned int> d_sort_permutation_thrust(d_sort_permutation);
HOOMD_THRUST::sort_by_key(d_sort_idx_thrust, d_sort_idx_thrust + cli.getNumElements(), d_sort_permutation_thrust, comp_less_uint2());
// apply sorted order
gpu_apply_sorted_cell_list_order<<<grid, threads>>>(
cli.getNumElements(),
d_cell_idx,
d_cell_idx_new,
d_xyzf,
d_xyzf_new,
d_tdb,
d_tdb_new,
d_cell_orientation,
d_cell_orientation_new,
d_sort_permutation,
cli);
// copy back permuted arrays to original ones
if (d_xyzf)
cudaMemcpy(d_xyzf, d_xyzf_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_cell_idx, d_cell_idx_new, sizeof(unsigned int)*cli.getNumElements(), cudaMemcpyDeviceToDevice);
if (d_tdb)
{
cudaMemcpy(d_tdb, d_tdb_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice);
}
if (d_cell_orientation)
{
cudaMemcpy(d_cell_orientation, d_cell_orientation_new, sizeof(Scalar4)*cli.getNumElements(), cudaMemcpyDeviceToDevice);
}
return cudaSuccess;
}
|
95e18157e28caaea8c8331e6129110de5ab53014.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zbajac_csr.cu, normal z -> s, Tue Aug 30 09:38:41 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_s
#define BLOCKSIZE 256
__global__ void
magma_sbajac_csr_ls_kernel(int localiters, int n,
float * valD,
magma_index_t * rowD,
magma_index_t * colD,
float * valR,
magma_index_t * rowR,
magma_index_t * colR,
const float * __restrict__ b,
float * x )
{
int inddiag = blockIdx.x*blockDim.x;
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, j, start, end;
if (index < n) {
start = rowR[index];
end = rowR[index+1];
float zero = MAGMA_S_MAKE(0.0, 0.0);
float bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
/* add more local iterations */
__shared__ float local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
x[index] = local_x[threadIdx.x];
}
}
__global__ void
magma_sbajac_csr_kernel(
int n,
float * valD,
magma_index_t * rowD,
magma_index_t * colD,
float * valR,
magma_index_t * rowR,
magma_index_t * colR,
float * b,
float * x )
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, start, end;
if (index < n) {
float zero = MAGMA_S_MAKE(0.0, 0.0);
float bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
start = rowR[index];
end = rowR[index+1];
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
v = bl - v;
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
x[index] = x[index] + ( v - tmp ) / (valD[start]);
}
}
/**
Purpose
-------
This routine is a block-asynchronous Jacobi iteration performing s
local Jacobi-updates within the block. Input format is two CSR matrices,
one containing the diagonal blocks, one containing the rest.
Arguments
---------
@param[in]
localiters magma_int_t
number of local Jacobi-like updates
@param[in]
D magma_s_matrix
input matrix with diagonal blocks
@param[in]
R magma_s_matrix
input matrix with non-diagonal parts
@param[in]
b magma_s_matrix
RHS
@param[in]
x magma_s_matrix*
iterate/solution
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbajac_csr(
magma_int_t localiters,
magma_s_matrix D,
magma_s_matrix R,
magma_s_matrix b,
magma_s_matrix *x,
magma_queue_t queue )
{
int blocksize1 = BLOCKSIZE;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( D.num_rows, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
if ( R.nnz > 0 ) {
if ( localiters == 1 )
hipLaunchKernelGGL(( magma_sbajac_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
else
hipLaunchKernelGGL(( magma_sbajac_csr_ls_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
}
else {
printf("error: all elements in diagonal block.\n");
}
return MAGMA_SUCCESS;
}
| 95e18157e28caaea8c8331e6129110de5ab53014.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from sparse-iter/blas/zbajac_csr.cu, normal z -> s, Tue Aug 30 09:38:41 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_s
#define BLOCKSIZE 256
__global__ void
magma_sbajac_csr_ls_kernel(int localiters, int n,
float * valD,
magma_index_t * rowD,
magma_index_t * colD,
float * valR,
magma_index_t * rowR,
magma_index_t * colR,
const float * __restrict__ b,
float * x )
{
int inddiag = blockIdx.x*blockDim.x;
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, j, start, end;
if (index < n) {
start = rowR[index];
end = rowR[index+1];
float zero = MAGMA_S_MAKE(0.0, 0.0);
float bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
/* add more local iterations */
__shared__ float local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
x[index] = local_x[threadIdx.x];
}
}
__global__ void
magma_sbajac_csr_kernel(
int n,
float * valD,
magma_index_t * rowD,
magma_index_t * colD,
float * valR,
magma_index_t * rowR,
magma_index_t * colR,
float * b,
float * x )
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, start, end;
if (index < n) {
float zero = MAGMA_S_MAKE(0.0, 0.0);
float bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
start = rowR[index];
end = rowR[index+1];
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
v = bl - v;
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
x[index] = x[index] + ( v - tmp ) / (valD[start]);
}
}
/**
Purpose
-------
This routine is a block-asynchronous Jacobi iteration performing s
local Jacobi-updates within the block. Input format is two CSR matrices,
one containing the diagonal blocks, one containing the rest.
Arguments
---------
@param[in]
localiters magma_int_t
number of local Jacobi-like updates
@param[in]
D magma_s_matrix
input matrix with diagonal blocks
@param[in]
R magma_s_matrix
input matrix with non-diagonal parts
@param[in]
b magma_s_matrix
RHS
@param[in]
x magma_s_matrix*
iterate/solution
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbajac_csr(
magma_int_t localiters,
magma_s_matrix D,
magma_s_matrix R,
magma_s_matrix b,
magma_s_matrix *x,
magma_queue_t queue )
{
int blocksize1 = BLOCKSIZE;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( D.num_rows, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
if ( R.nnz > 0 ) {
if ( localiters == 1 )
magma_sbajac_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
else
magma_sbajac_csr_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
}
else {
printf("error: all elements in diagonal block.\n");
}
return MAGMA_SUCCESS;
}
|
ca189ed23070eeb8e32ab1e86e8b7c456fdfa306.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <time.h>
#include <math.h>
#include <memory.h>
#include "matrixop.cuh"
#define RR 190
#define M 256
#define N 256
#define BLOCK_SIZE 16
#define THREAD_NUM 256
#define imin(a, b) (a<b ? a : b)
const int threadsPerBlock = 16;
const int blocksPerGrid = imin(32, (RR + threadsPerBlock - 1) / threadsPerBlock);
//CPU
//double** matrixturn(double** matrix, int height, int width) {
// int i = 0, j;
// double** turn = (double**)malloc(width * sizeof(double*));
// if (!turn)
// return NULL;
// for (i = 0; i < width; i++)
// turn[i] = (double*)malloc(height * sizeof(double));
// for (i = 0; i < width; i++)
// for (j = 0; j < height; j++)
// turn[i][j] = matrix[j][i];
// return turn;
//}
//double** matrixmultiplic(double** matrix1, int width, double** matrix2,
// int height, int wid2) {
// int i, j, k;
// double** ans = NULL;
// //double** ans = creatmatrix(height,width); conflicting types for
// ans = (double**)malloc(height * sizeof(double*));
// if (ans == NULL)
// return NULL;
// for (i = 0; i < height; i++) {
// ans[i] = (double*)malloc(width * sizeof(double));
// if (ans[i] == NULL)
// return NULL;
// }
// //
// for (i = 0; i < height; i++) {
// for (j = 0; j < width; j++) {
// ans[i][j] = 0;
// for (k = 0; k < wid2; k++) {
// ans[i][j] += matrix2[i][k] * matrix1[k][j];
// }
// }
// }
// return ans;
//}
//double** creatmatrix(int height, int width) {
// double **pixel = (double**)malloc(height * sizeof(double*));
// if (pixel == NULL) {
// printf("It is out of memory1!\n");
// return NULL;
// }
// int i, j;
// for (i = 0; i < height; i++) {
// pixel[i] = (double*)malloc(width * sizeof(double));
// if (pixel[i] == NULL) {
// printf("It is out of memory2! %d\n", i);
// return NULL;
// }
// }
// for (i = 0; i < height; i++) {
// for (j = 0; j < width; j++) {
// pixel[i][j] = 0;
// }
// }
// return pixel;
//}
//GPU
/*
*/
__global__ void result(float *result, float *aug_y, int *posarray, int i)
{
int j;
for (j = 0; j < N; j++)
{
result[j] = 0;
}
for (j = 0; j < i; j++)
{
result[posarray[j]] = aug_y[j];
}
}
/*
*/
__global__ void valueMul(float *c, float *a, int i)
{
(*c) = a[i] * a[i];
}
/*
*/
__global__ void valueSqrt(float *a, float *b, float *c)
{
(*c) = (*b) / sqrt((*a));
}
/*
*/
__global__ void d_find_max(int *idx, float *P, int n){
float output[N];
for (int i = 0;i < N;i++)
output[i] = P[i];
for (int i = 1; i<n; i++){
if (P[i]>P[0]){
P[0] = P[i];
(*idx) = i;
}
}
return;
}
/*
*/
__global__ void vector_sub_vector(float *C, float *A, float *B, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
C[tid] = A[tid] - B[tid];
}
}
/*
AM*PBP*N
*/
__global__ static void matMult_gpu(float *A, float *B, float *C, int m, int p, int n)
{
extern __shared__ float data[];
int tid = threadIdx.x;
int row = blockIdx.x; //Rowblockthreads
int i, j;
for (i = tid; i<p; i += blockDim.x){
data[i] = A[row*p + i];
}
__syncthreads();
for (j = tid; j<n; j += blockDim.x){
float t = 0;
float y = 0;
for (i = 0; i<p; i++){
float r;
y -= data[i] * B[i*n + j];
r = t - y;
y = (r - t) + y;
t = r;
}
C[row*n + j] = t;
}
}
/*
*/
__global__ static void matrix_transpose(float *A_T, float *A, int hA, int wA)
{
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE + 1];
unsigned int xIndex = blockIdx.x * BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_SIZE + threadIdx.y;
if ((xIndex < wA) && (yIndex < hA))
{
unsigned int aIndex = yIndex * wA + xIndex;
temp[threadIdx.y][threadIdx.x] = A[aIndex];
}
__syncthreads();
xIndex = blockIdx.y * BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * BLOCK_SIZE + threadIdx.y;
if ((xIndex < hA) && (yIndex < wA))
{
unsigned int a_tIndex = yIndex * hA + xIndex;
A_T[a_tIndex] = temp[threadIdx.x][threadIdx.y];
}
}
/*
A(aH, aW); B(aW, 1); C(aH, 1)
*/
__global__ static void matrix_x_vector(float *C, float *A, float *B, int wA)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = bid*blockDim.x + tid;
float temp = 0.0;
__syncthreads();
if (offset<wA)
{
for (int i = 0; i < wA; i++)
{
temp += A[offset*wA + i] * B[i];
}
__syncthreads();
C[offset] = temp;
}
}
/*
*/
__global__ void vector_dot_product(float *C, float *A, float *B, int n)
{
__shared__ float temp[BLOCK_SIZE];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
double result = 0.0;
while (tid < n)
{
result += A[tid] * B[tid];
tid += blockDim.x * gridDim.x;
}
temp[tempIndex] = result;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
C[blockIdx.x] = temp[0];
}
}
//__global__ void vector_dot_sum(float *C)
//{
// extern __shared__ float temp[];
// temp[threadIdx.x] = C[threadIdx.x];
// for (int s = 1; s < blockDim.x; s *= 2)
// {
// if (threadIdx.x % (2 * s) == 0)
// {
// temp[threadIdx.x] += temp[threadIdx.x + s];
// }
// __syncthreads();
// }
// if (threadIdx.x == 0)
// {
// C[0] = temp[0];
// }
//}
__global__ void vector_dot_sum(float *C)
{
float temp=0;
for (int s = 0; s < RR; s ++)
{
temp += C[s];
}
C[0] = temp;
}
/********************************************************************
*
*********************************************************************/
/*cholesky A = LDL^T
aL
d
d_inversion
*/
__global__ void cholesky(float *a, float *d, float *d_inversion, int n)
{
extern __shared__ float result_sum[];
int tid = threadIdx.x;
float sum = 0.0;
float sum_t;
for (int i = 0; i<n; i++)
{
for (int j = 0; j<n; j++)
{
sum = a[j*n + i]; //
for (int k = tid; k<i; k += blockDim.x){
result_sum[k] = a[i*n + k] * a[j*n + k] * d[k*n + k];
}
__syncthreads();
sum_t = 0;
for (int k = 0; k<i; k++){
sum_t += result_sum[k];
}
sum -= sum_t;
if (i == j)
{
d[i*n + i] = sum;
d_inversion[i*n + i] = 1 / sum;
a[i*n + j] = 1;
}
else if (j<i)
{
a[j*n + i] = 0;
}
else
{
a[j*n + i] = sum / d[i*n + i];
}
}
}
}
/*
EA
*/
__global__ void matrix_inversion(float *a, float *E, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = tid + blockDim.x*bid;
for (int i = 0; i<n; i++)
{
for (int k = i + 1; k<n; k++)
{
if (offset<n){
E[k*n + offset] = E[k*n + offset] - E[i*n + offset] * a[k*n + i];
}
}
}
}
/*
dev_a_transposedev_E
*/
__global__ static void matrix_trans(float *dev_E, float *dev_a_transpose, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = tid + bid*blockDim.x;
int row = offset / n;
int column = offset % n;
if (row<n && column<n){
dev_a_transpose[row*n + column] = dev_E[column*n + row];
}
}
/*
A_tAB
*/
__global__ static void matrix_multiply(float *A, float *B, float *A_t, int n){
extern __shared__ float data[];
int tid = threadIdx.x;
int row = blockIdx.x; //Rowblockthreads
int i, j;
for (i = tid; i<n; i += blockDim.x){
data[i] = A[row*n + i];
}
__syncthreads();
for (j = tid; j<n; j += blockDim.x){
float t = 0;
float y = 0;
for (i = 0; i<n; i++){
float r;
y -= data[i] * B[i*n + j];
r = t - y;
y = (r - t) + y;
t = r;
}
A_t[row*n + j] = t;
}
}
/*WORD*/
typedef unsigned short WORD;
/*DWORDe*/
typedef unsigned long DWORD;
/**/
typedef struct BMP_FILE_HEADER {
WORD bType; /* */
DWORD bSize; /* */
WORD bReserved1; /* ,0 */
WORD bReserved2; /* ,0 */
DWORD bOffset; /* */
} BMPFILEHEADER;
/**/
typedef struct BMP_INFO {
DWORD bInfoSize; /* */
DWORD bWidth; /* */
DWORD bHeight; /* */
WORD bPlanes; /* */
WORD bBitCount; /* */
DWORD bCompression; /* */
DWORD bmpImageSize; /* , */
DWORD bXPelsPerMeter; /* */
DWORD bYPelsPerMeter; /* */
DWORD bClrUsed; /* */
DWORD bClrImportant; /* */
} BMPINF;
/**/
typedef struct RGB_QUAD {
WORD rgbBlue; /* */
WORD rgbGreen; /* */
WORD rgbRed; /* */
WORD rgbReversed; /* */
} RGBQUAD;
double anss[M][256];
unsigned char ans1[M][256];
double ww1[M][256];
double wwturn1[M][256];
double tempx[M][256];
double pixel[M][N];
double simplingmatrix[RR][N];
double compressed_matrix[RR][N];
double ww[N][N], wwturn[N][N];
double temp1[RR][N];
float Simplingmatrix[RR * N], Compressed_matrix[RR * N];
float showmeans[256] = { -1 };
float showmemans[256 * 256] = { -1 };
float showmemans2[190 * 256] = { -1 };
int givemethevans[190] = { -1 };
int givemetheans=-1;
void testout(double** x, int height, int width, char*str); //
void testoutx(double x[][N], int height, int width, char*str); //
extern "C" void readbmp(FILE* fp, double pixel[][N]); //
extern "C" void writebmp(FILE* fo, unsigned char pixel[][N], int height, int width);
void createsimplingmatrix(int wid, int height, double phi[][N]); //
double error(double** piexl, double** ans, int height, int width);
void matrixmultiplicxx(double matrix1[][N], int width, double matrix2[][N],
int height, int wid2, double ans[][N]) {
int i, j, k;
//
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
ans[i][j] = 0;
for (k = 0; k < wid2; k++) {
ans[i][j] += matrix2[i][k] * matrix1[k][j];
}
}
}
}
void testoutcol(double* x, int height, char*str) {
int i;
FILE *fto = fopen(str, "wb");
for (i = 0; i < height; i++) {
fprintf(fto, "%f\t", x[i]);
}
fclose(fto);
}
double gaussrand(); //
int main() {
FILE *fp, *fo, *fp1;
FILE *fp2, *fp3;
BMPFILEHEADER fileHeader;
BMPINF infoHeader;
long width = 256, height = 256;
int i, j;
int temp[256 * 4];
//double **pixel = NULL;
//double **simplingmatrix = NULL;
//double **compressed_matrix = NULL;
for (i = 0;i < 256;i++)
showmeans[i] = 0;
double **ans = NULL;
float ansx[M*N];
//float* ansx;
//ansx = (float*)malloc(M*N * sizeof(float));
//printf("OK\n");
//if ((fp = fopen("lena256.bmp", "rb")) == NULL) {
// printf("Cann't open lena256!\n");
// exit(0);
//}
//if ((fp1 = fopen("DWT.txt", "rb")) == NULL) {
// printf("Cann't open DWT.txt!\n");
// exit(0);
//}
//if ((fo = fopen("OK.bmp", "wb")) == NULL) {
// printf("Cann't open OK.bmp!\n");
// exit(0);
//}
////printf("OK\n");
//fseek(fp, 0, 0);
//fread(&fileHeader, sizeof(fileHeader), 1, fp);
//fwrite(&fileHeader, sizeof(fileHeader), 1, fo);
//fread(&infoHeader, sizeof(infoHeader), 1, fp);
//fwrite(&infoHeader, sizeof(infoHeader), 1, fo);
//fread(&temp, sizeof(unsigned int), 256, fp);
//fwrite(&temp, sizeof(unsigned int), 256, fo);
////printf("OK\n");
//width = infoHeader.bWidth;
//height = infoHeader.bHeight;
////printf("OK\n");
////creatmatrix(pixel,height, width);
////if (pixel == NULL)
//// return 0;
//readbmp(fp, pixel);
//printf("read OK");
//createsimplingmatrix(RR, width, simplingmatrix);
//matrixmultiplicxx(pixel, width, simplingmatrix, RR, width, compressed_matrix);
//printf("OK\n");
//creatmatrix(ww,M, N);
//for (i = 0; i<M; i++)
//for (j = 0; j<N; j++)
// fscanf(fp1, "%lf", &ww[i][j]);
//matrixturn(ww, height, height, wwturn);
//for (i = 0; i<M; i++)
//for (j = 0; j < N; j++) {
// ww1[i][j] = ww[i][j];
// wwturn1[i][j] = wwturn[i][j];
//}
printf("OK\n");
//matrixmultiplicxx(wwturn, width, compressed_matrix,
// RR, width,temp1);
//for (i = 0;i < RR;i++) {
// for (j = 0;j < N;j++)
// compressed_matrix[i][j] = temp1[i][j];
//}
//matrixmultiplicxx(wwturn, width, simplingmatrix, RR, width, temp1);
//for(i=0;i<RR;i++)
// for(j=0;j<N;j++)
//simplingmatrix[i][j] = temp1[i][j];
//ans = creatmatrix(height, width);
if ((fp2 = fopen("s.txt", "rb")) == NULL) {
printf("Cann't open DWT.txt!\n");
exit(0);
}
for (i = 0; i<190; i++)
for (j = 0; j<256; j++)
fscanf(fp2, "%lf", &simplingmatrix[i][j]);
if ((fp3 = fopen("c.txt", "rb")) == NULL) {
printf("Cann't open DWT.txt!\n");
exit(0);
}
for (i = 0; i<190; i++)
for (j = 0; j<256; j++)
fscanf(fp3, "%lf", &compressed_matrix[i][j]);
//
//Simplingmatrix = (float*)malloc(RR * width * sizeof(float));
//Compressed_matrix = (float*)malloc(RR * width * sizeof(float));
for (i = 0; i < N; i++)
{
for (j = 0; j < RR; j++)
{
Simplingmatrix[i * RR + j] = simplingmatrix[j][i];
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < RR; j++)
{
Compressed_matrix[i * RR+ j] = compressed_matrix[j][i];
}
}
//GPU
float *dev_Simpling, *dev_Compress;
hipMalloc((void**)&dev_Simpling, RR * width * sizeof(float));
hipMalloc((void**)&dev_Compress, RR * width * sizeof(float));
hipMemcpy(dev_Simpling, Simplingmatrix, RR * width * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_Compress, Compressed_matrix, RR * width * sizeof(float), hipMemcpyHostToDevice);
float *dev_final_result;
hipMalloc((void**)&dev_final_result, N* sizeof(float));
//dev_Aug_tdev_temp3RR*256,256*256
float *dev_Aug_t, *dev_Aug_tt, *dev_temp3, *dev_temp1, *dev_temp2;
float *r_n_temp;
hipMalloc((void**)&r_n_temp, RR* sizeof(float));
hipMalloc((void**)&dev_Aug_t, RR * N * sizeof(float));
hipMalloc((void**)&dev_Aug_tt, N* N * sizeof(float));
hipMalloc((void**)&dev_temp3, RR * N * sizeof(float));
hipMalloc((void**)&dev_temp1, N* N * sizeof(float));
hipMalloc((void**)&dev_temp2, N* N * sizeof(float));
float *judge2;
judge2 = (float*)malloc(sizeof(float));
//times*1
float *dev_aug_y;
//
float *a_transpose, *E, *A_t, *d_transpose, *a, *a_a, *a_t, *d;
a_transpose = (float*)malloc(sizeof(float)*N*N);
E = (float*)malloc(sizeof(float)*N*N);
A_t = (float*)malloc(sizeof(float)*N*N);
d_transpose = (float*)malloc(sizeof(float)*N*N);
//a = (float*)malloc(sizeof(float)*N*N);
//a_a = (float*)malloc(sizeof(float)*N*N);
//a_t = (float*)malloc(sizeof(float)*N*N);
d = (float*)malloc(sizeof(float)*N*N);
float *dev_a, *dev_d, *dev_a_transpose, *dev_E, *dev_A_t, *dev_A_t_t, *dev_d_transpose;
hipMalloc((void**)&dev_a, N * N * sizeof(float));
hipMalloc((void**)&dev_d, N * N * sizeof(float));
hipMalloc((void**)&dev_a_transpose, N * N * sizeof(float));
hipMalloc((void**)&dev_E, N * N * sizeof(float));
hipMalloc((void**)&dev_A_t, N * N * sizeof(float));
hipMalloc((void**)&dev_A_t_t, N * N * sizeof(float));
hipMalloc((void**)&dev_d_transpose, N * N * sizeof(float));
hipMalloc((void**)&dev_aug_y, N * sizeof(float));
int c = 0;
//
for (int i = 0; i<N; i++)
{
for (int j = 0; j<N; j++)
{
d[i*N + j] = 0;
}
}
//
for (int i = 0; i<N; i++)
{
for (int j = 0; j<N; j++)
{
if (i == j)
{
E[i*N + j] = 1;
}
else
{
E[i*N + j] = 0;
}
}
}
//
for (int i = 0; i<N; i++){
for (int j = 0; j<N; j++)
{
A_t[i*N + j] = 0;
}
}
hipMemcpy(dev_d, d, N * N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_d_transpose, d, N * N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_A_t, A_t, N * N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_E, E, N * N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_A_t_t, A_t, N * N * sizeof(float), hipMemcpyHostToDevice);
//
int *dev_pos;
//
int *dev_pos_array;
//
int *dev_c;
float *dev_judge1, *dev_judge2;
//
//float *dev_norm;
//
float *dev_column;
float *dev_finishomp;
float *dev_finish;
float *dev_column_temp;
float *dev_product_temp;
float *dev_product;
float *dev_r_n;
float *dev_Simp_column;
float zero[RR];
float *dev_zero;
for (i = 0; i < RR; i++)
{
zero[i] = 0;
}
hipMalloc((void**)&dev_zero, RR * sizeof(float));
hipMalloc((void**)&dev_finishomp, M * N *sizeof(float));
hipMalloc((void**)&dev_finish, M * N *sizeof(float));
hipMemcpy(dev_zero, zero, RR*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_r_n, RR * sizeof(float));
hipMalloc((void**)&dev_Simp_column, RR * sizeof(float));
hipMalloc((void**)&dev_product_temp, RR * sizeof(float));
hipMalloc((void**)&dev_product, N * sizeof(float));
hipMalloc((void**)&dev_column, RR * sizeof(float));
hipMalloc((void**)&dev_column_temp, RR * sizeof(float));
//hipMalloc((void**)&dev_norm, sizeof(float));
hipMalloc((void**)&dev_pos, sizeof(int));
hipMalloc((void**)&dev_pos_array, RR * sizeof(int));
hipMalloc((void**)&dev_c, sizeof(int));
hipMalloc((void**)&dev_judge1, sizeof(float));
hipMalloc((void**)&dev_judge2, sizeof(float));
for (i = 0;i < RR;i++)
zero[i] = 0;
printf("OMP start\n");
int col;
int times;
for (i = 0; i < width; i++)
{
hipMemcpy(dev_column, (dev_Compress + i * RR*sizeof(float)), RR*sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpy(dev_r_n, dev_column, RR * sizeof(float), hipMemcpyDeviceToDevice);
for (times = 1; times <= RR; times++)
{
for (col = 0; col < N; col++)
{
hipMemcpy(dev_Simp_column, &dev_Simpling[col * RR], RR*sizeof(float), hipMemcpyDeviceToDevice);
vector_dot_product << <blocksPerGrid, threadsPerBlock >> >(dev_product_temp, dev_Simp_column, dev_r_n, RR);
vector_dot_sum << <1, 1 >> >(dev_product_temp);
hipMemcpy(&dev_product[col], dev_product_temp, sizeof(float), hipMemcpyDeviceToDevice);
}
d_find_max << <1, 1 >> >(dev_pos, dev_product, N);
int pos = 0;
hipMemcpy(&pos, dev_pos, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&dev_Aug_tt[(times - 1)*RR], &dev_Simpling[pos * RR], RR*sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpy(&dev_Simpling[pos*RR], dev_zero, RR*sizeof(float), hipMemcpyDeviceToDevice);
int ax = (times + BLOCK_SIZE - 1) / BLOCK_SIZE;
int bx = (RR + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 blocks(bx, ax);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
//hipMemcpy(showmemans, dev_Aug_tt, N*N * sizeof(float), hipMemcpyDeviceToHost);
matrix_transpose << <blocks, threads >> >(dev_Aug_t, dev_Aug_tt, N, RR);
matMult_gpu << < N, N, sizeof(float)*RR >> >(dev_Aug_tt, dev_Aug_t, dev_temp1, N, RR, N);
cholesky << <1, THREAD_NUM, sizeof(float)*times >> >(dev_temp1, dev_d, dev_d_transpose, times);
matrix_inversion << <(times + THREAD_NUM - 1) / THREAD_NUM, THREAD_NUM >> >(dev_a, dev_E, times);
matrix_trans << <((times + THREAD_NUM - 1) / THREAD_NUM)*times, THREAD_NUM >> >(dev_E, dev_a_transpose, times);
matrix_multiply << <times, THREAD_NUM, sizeof(float)*times >> >(dev_a_transpose, dev_d_transpose, dev_A_t, times);
matrix_multiply << <times, THREAD_NUM, sizeof(float)*times >> >(dev_A_t, dev_E, dev_temp2, times);
matMult_gpu << < N, RR, sizeof(float)*N >> >(dev_temp2, dev_Aug_tt, dev_temp3, N, N, RR);
hipMemcpy(showmemans, dev_temp2, N*N * sizeof(float), hipMemcpyDeviceToHost);
matrix_x_vector << < (N + THREAD_NUM - 1 / THREAD_NUM), THREAD_NUM >> >(dev_aug_y, dev_temp3, dev_column, RR);
matrix_x_vector << < (RR + THREAD_NUM - 1 / THREAD_NUM), THREAD_NUM >> >(r_n_temp, dev_Aug_t, dev_aug_y,N);
vector_sub_vector << <1, RR >> >(dev_r_n, dev_column, r_n_temp, RR);
hipMemcpy(&dev_pos_array[times - 1], dev_pos, sizeof(int), hipMemcpyDeviceToDevice);
valueMul << <1, 1 >> >(dev_judge1, dev_aug_y, (times - 1));
vector_dot_product << <blocksPerGrid, threadsPerBlock >> >(dev_product_temp, dev_aug_y, dev_aug_y, N);
vector_dot_sum << <1, 1 >> >(dev_product_temp);
valueSqrt << <1, 1 >> >(dev_product_temp, dev_judge1, dev_judge2);
hipMemcpy(judge2, dev_judge2, sizeof(float), hipMemcpyDeviceToHost);
if ((*judge2) < 0.05)
{
c = times;
break;
}
}
result << <1, 1 >> >(dev_final_result, dev_aug_y, dev_pos_array, c);
hipMemcpy((dev_finishomp + i*N*sizeof(float)), dev_final_result, N*sizeof(float), hipMemcpyDeviceToDevice);
}
printf("OMP end\n");
int ax2 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE;
int bx2 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 blocks2(bx2, ax2);
dim3 threads2(BLOCK_SIZE, BLOCK_SIZE);
matrix_transpose << < blocks2, threads2 >> >(dev_finish, dev_finishomp, M, N);
hipMemcpy(ansx, dev_finish, M*N*sizeof(float), hipMemcpyDeviceToHost);
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
anss[i][j] = ansx[i*N + j];
//anss[i][j] = ans[i][j];
printf("preapare to out.txt\n");
testoutx(anss, height, width, "ans.txt");
////
//matrixmultiplicxx(anss, width, wwturn1, height, width, tempx);
//matrixmultiplicxx(ww1, width, tempx, height, width, anss);
//printf("\n");
//for (i = 0; i < M; i++)
//for (j = 0; j < N; j++)
// ans[i][j] = anss[i][j];
//printf("ans \n");
//for (i = 0; i < M; i++) {
// for (j = 0; j < N; j++) {
// if (anss[i][j] > 255) {
// ans1[i][j] = 255;
// }
// else if (anss[i][j] < 0) {
// ans1[i][j] = 0;
// }
// else {
// ans1[i][j] = anss[i][j];
// }
// }
//}
//printf("\n");
//writebmp(fo, ans1, height, width);
return 0;
}
void testout(double** x, int height, int width, char*str) {
int i, j;
FILE *fto = fopen(str, "wb");
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
fprintf(fto, "%f\t", x[i][j]);
}
fprintf(fto, "\n");
}
fclose(fto);
}
void testoutx(double x[][N], int height, int width, char*str) {
int i, j;
FILE *fto = fopen(str, "wb");
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
fprintf(fto, "%f\t", x[i][j]);
}
fprintf(fto, "\n");
}
fclose(fto);
}
void readbmp(FILE* fp, double pixel[][N]) {
int i, j;
for (i = N - 1; i >= 0; i--) {
for (j = 0; j < N; j++) {
pixel[i][j] = (double)fgetc(fp);
}
}
fclose(fp);
}
void createsimplingmatrix(int height, int wid, double phi[][N]) {
int i, j;
double x, p[RR][N];
for (i = 0; i < height; i++)
for (j = 0; j < wid; j++) {
phi[i][j] = 0;
p[i][j] = 0;
}
srand((int)time(NULL));
for (i = 0; i < height; i++) {
for (j = 0; j < wid; j++) {
x = gaussrand();
p[i][j] = x;
}
}
for (i = 0; i < height; i++)
for (j = 0; j < wid; j++) {
phi[i][j] = p[i][j];
}
}
double gaussrand() {
static double U, V;
static int phase = 0;
double z;
if (phase == 0)
{
U = (rand() + 1.1) / (RAND_MAX + 2.);
V = rand() / (RAND_MAX + 1.);
z = sqrt(-1 * log(U))* sin(2 * 3.141592654 * V);
}
else
{
z = sqrt(-2 * log(U)) * cos(2 * 3.141592654 * V);
}
phase = 1 - phase;
return z;
}
void writebmp(FILE* fo, unsigned char pixel[][N], int height, int width) {
int i, j;
for (i = height - 1; i >= 0; i--) {
for (j = 0; j < width; j++) {
fwrite(&pixel[j][i], sizeof(unsigned char), 1, fo);
}
}
fclose(fo);
}
double error(double** piexl, double** ans, int height, int width){
double sum = 0, psnr;
int i, j;
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
sum += pow(fabs(ans[i][j] - piexl[i][j]), 2);
psnr = 10 * log10(255 * 255 / (sum / height / width));
return psnr;
}
| ca189ed23070eeb8e32ab1e86e8b7c456fdfa306.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <time.h>
#include <math.h>
#include <memory.h>
#include "matrixop.cuh"
#define RR 190
#define M 256
#define N 256
#define BLOCK_SIZE 16
#define THREAD_NUM 256
#define imin(a, b) (a<b ? a : b)
const int threadsPerBlock = 16;
const int blocksPerGrid = imin(32, (RR + threadsPerBlock - 1) / threadsPerBlock);
//CPU函数
//double** matrixturn(double** matrix, int height, int width) {
// int i = 0, j;
// double** turn = (double**)malloc(width * sizeof(double*));
// if (!turn)
// return NULL;
// for (i = 0; i < width; i++)
// turn[i] = (double*)malloc(height * sizeof(double));
// for (i = 0; i < width; i++)
// for (j = 0; j < height; j++)
// turn[i][j] = matrix[j][i];
// return turn;
//}
//double** matrixmultiplic(double** matrix1, int width, double** matrix2,
// int height, int wid2) {
// int i, j, k;
// double** ans = NULL;
// //double** ans = creatmatrix(height,width); 这种写法会报错 conflicting types for 在声明前使用
// ans = (double**)malloc(height * sizeof(double*));
// if (ans == NULL)
// return NULL;
// for (i = 0; i < height; i++) {
// ans[i] = (double*)malloc(width * sizeof(double));
// if (ans[i] == NULL)
// return NULL;
// }
// // 矩阵乘法
// for (i = 0; i < height; i++) {
// for (j = 0; j < width; j++) {
// ans[i][j] = 0;
// for (k = 0; k < wid2; k++) {
// ans[i][j] += matrix2[i][k] * matrix1[k][j];
// }
// }
// }
// return ans;
//}
//double** creatmatrix(int height, int width) {
// double **pixel = (double**)malloc(height * sizeof(double*));
// if (pixel == NULL) {
// printf("It is out of memory1!\n");
// return NULL;
// }
// int i, j;
// for (i = 0; i < height; i++) {
// pixel[i] = (double*)malloc(width * sizeof(double));
// if (pixel[i] == NULL) {
// printf("It is out of memory2! %d\n", i);
// return NULL;
// }
// }
// for (i = 0; i < height; i++) {
// for (j = 0; j < width; j++) {
// pixel[i][j] = 0;
// }
// }
// return pixel;
//}
//GPU核函数
/*
赋值
*/
__global__ void result(float *result, float *aug_y, int *posarray, int i)
{
int j;
for (j = 0; j < N; j++)
{
result[j] = 0;
}
for (j = 0; j < i; j++)
{
result[posarray[j]] = aug_y[j];
}
}
/*
数值相乘
*/
__global__ void valueMul(float *c, float *a, int i)
{
(*c) = a[i] * a[i];
}
/*
开根号
*/
__global__ void valueSqrt(float *a, float *b, float *c)
{
(*c) = (*b) / sqrt((*a));
}
/*
找最大值下标
*/
__global__ void d_find_max(int *idx, float *P, int n){
float output[N];
for (int i = 0;i < N;i++)
output[i] = P[i];
for (int i = 1; i<n; i++){
if (P[i]>P[0]){
P[0] = P[i];
(*idx) = i;
}
}
return;
}
/*
向量相减
*/
__global__ void vector_sub_vector(float *C, float *A, float *B, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
C[tid] = A[tid] - B[tid];
}
}
/*
矩阵相乘
A:M*P,B:P*N
*/
__global__ static void matMult_gpu(float *A, float *B, float *C, int m, int p, int n)
{
extern __shared__ float data[];
int tid = threadIdx.x;
int row = blockIdx.x; //一个Row只能由同一个block的threads来进行计算
int i, j;
for (i = tid; i<p; i += blockDim.x){
data[i] = A[row*p + i];
}
__syncthreads();
for (j = tid; j<n; j += blockDim.x){
float t = 0;
float y = 0;
for (i = 0; i<p; i++){
float r;
y -= data[i] * B[i*n + j];
r = t - y;
y = (r - t) + y;
t = r;
}
C[row*n + j] = t;
}
}
/*
矩阵转置
*/
__global__ static void matrix_transpose(float *A_T, float *A, int hA, int wA)
{
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE + 1];
unsigned int xIndex = blockIdx.x * BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_SIZE + threadIdx.y;
if ((xIndex < wA) && (yIndex < hA))
{
unsigned int aIndex = yIndex * wA + xIndex;
temp[threadIdx.y][threadIdx.x] = A[aIndex];
}
__syncthreads();
xIndex = blockIdx.y * BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * BLOCK_SIZE + threadIdx.y;
if ((xIndex < hA) && (yIndex < wA))
{
unsigned int a_tIndex = yIndex * hA + xIndex;
A_T[a_tIndex] = temp[threadIdx.x][threadIdx.y];
}
}
/*
矩阵与向量相乘
A(aH, aW); B(aW, 1); C(aH, 1)
*/
__global__ static void matrix_x_vector(float *C, float *A, float *B, int wA)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = bid*blockDim.x + tid;
float temp = 0.0;
__syncthreads();
if (offset<wA)
{
for (int i = 0; i < wA; i++)
{
temp += A[offset*wA + i] * B[i];
}
__syncthreads();
C[offset] = temp;
}
}
/*
向量点积
先求出两两相乘的乘积,再归约求所有乘积的和
*/
__global__ void vector_dot_product(float *C, float *A, float *B, int n)
{
__shared__ float temp[BLOCK_SIZE];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int tempIndex = threadIdx.x;
double result = 0.0;
while (tid < n)
{
result += A[tid] * B[tid];
tid += blockDim.x * gridDim.x;
}
temp[tempIndex] = result;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (tempIndex < i)
{
temp[tempIndex] += temp[tempIndex + i];
}
__syncthreads();
i /= 2;
}
if (tempIndex == 0)
{
C[blockIdx.x] = temp[0];
}
}
//__global__ void vector_dot_sum(float *C)
//{
// extern __shared__ float temp[];
// temp[threadIdx.x] = C[threadIdx.x];
// for (int s = 1; s < blockDim.x; s *= 2)
// {
// if (threadIdx.x % (2 * s) == 0)
// {
// temp[threadIdx.x] += temp[threadIdx.x + s];
// }
// __syncthreads();
// }
// if (threadIdx.x == 0)
// {
// C[0] = temp[0];
// }
//}
__global__ void vector_dot_sum(float *C)
{
float temp=0;
for (int s = 0; s < RR; s ++)
{
temp += C[s];
}
C[0] = temp;
}
/********************************************************************
* 矩阵求逆
*********************************************************************/
/*cholesky分解 A = LDL^T
最后a中存放的是分解后的单位下三角矩阵L,
d中存放的是对角均为正数的对角矩阵
d_inversion中存放的是对角均为正数的对角矩阵的逆矩阵
*/
__global__ void cholesky(float *a, float *d, float *d_inversion, int n)
{
extern __shared__ float result_sum[];
int tid = threadIdx.x;
float sum = 0.0;
float sum_t;
for (int i = 0; i<n; i++)
{
for (int j = 0; j<n; j++)
{
sum = a[j*n + i]; //第一项
for (int k = tid; k<i; k += blockDim.x){
result_sum[k] = a[i*n + k] * a[j*n + k] * d[k*n + k];
}
__syncthreads();
sum_t = 0;
for (int k = 0; k<i; k++){
sum_t += result_sum[k];
}
sum -= sum_t;
if (i == j)
{
d[i*n + i] = sum;
d_inversion[i*n + i] = 1 / sum;
a[i*n + j] = 1;
}
else if (j<i)
{
a[j*n + i] = 0;
}
else
{
a[j*n + i] = sum / d[i*n + i];
}
}
}
}
/*单位下三角矩阵求逆
最后E中是A的逆矩阵
*/
__global__ void matrix_inversion(float *a, float *E, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = tid + blockDim.x*bid;
for (int i = 0; i<n; i++)
{
for (int k = i + 1; k<n; k++)
{
if (offset<n){
E[k*n + offset] = E[k*n + offset] - E[i*n + offset] * a[k*n + i];
}
}
}
}
/*矩阵转置
最后dev_a_transpose中是dev_E矩阵的转置矩阵
*/
__global__ static void matrix_trans(float *dev_E, float *dev_a_transpose, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = tid + bid*blockDim.x;
int row = offset / n;
int column = offset % n;
if (row<n && column<n){
dev_a_transpose[row*n + column] = dev_E[column*n + row];
}
}
/*矩阵求积
最后A_t中是A矩阵和B矩阵相乘的结果
*/
__global__ static void matrix_multiply(float *A, float *B, float *A_t, int n){
extern __shared__ float data[];
int tid = threadIdx.x;
int row = blockIdx.x; //一个Row只能由同一个block的threads来进行计算
int i, j;
for (i = tid; i<n; i += blockDim.x){
data[i] = A[row*n + i];
}
__syncthreads();
for (j = tid; j<n; j += blockDim.x){
float t = 0;
float y = 0;
for (i = 0; i<n; i++){
float r;
y -= data[i] * B[i*n + j];
r = t - y;
y = (r - t) + y;
t = r;
}
A_t[row*n + j] = t;
}
}
/*定义WORD为两个字节的类型*/
typedef unsigned short WORD;
/*定义DWORD为e四个字节的类型*/
typedef unsigned long DWORD;
/*位图文件头*/
typedef struct BMP_FILE_HEADER {
WORD bType; /* 文件标识符 */
DWORD bSize; /* 文件的大小 */
WORD bReserved1; /* 保留值,必须设置为0 */
WORD bReserved2; /* 保留值,必须设置为0 */
DWORD bOffset; /* 文件头的最后到图像数据位开始的偏移量 */
} BMPFILEHEADER;
/*位图信息头*/
typedef struct BMP_INFO {
DWORD bInfoSize; /* 信息头的大小 */
DWORD bWidth; /* 图像的宽度 */
DWORD bHeight; /* 图像的高度 */
WORD bPlanes; /* 图像的位面数 */
WORD bBitCount; /* 每个像素的位数 */
DWORD bCompression; /* 压缩类型 */
DWORD bmpImageSize; /* 图像的大小,以字节为单位 */
DWORD bXPelsPerMeter; /* 水平分辨率 */
DWORD bYPelsPerMeter; /* 垂直分辨率 */
DWORD bClrUsed; /* 使用的色彩数 */
DWORD bClrImportant; /* 重要的颜色数 */
} BMPINF;
/*彩色表*/
typedef struct RGB_QUAD {
WORD rgbBlue; /* 蓝色强度 */
WORD rgbGreen; /* 绿色强度 */
WORD rgbRed; /* 红色强度 */
WORD rgbReversed; /* 保留值 */
} RGBQUAD;
double anss[M][256];
unsigned char ans1[M][256];
double ww1[M][256];
double wwturn1[M][256];
double tempx[M][256];
double pixel[M][N];
double simplingmatrix[RR][N];
double compressed_matrix[RR][N];
double ww[N][N], wwturn[N][N];
double temp1[RR][N];
float Simplingmatrix[RR * N], Compressed_matrix[RR * N];
float showmeans[256] = { -1 };
float showmemans[256 * 256] = { -1 };
float showmemans2[190 * 256] = { -1 };
int givemethevans[190] = { -1 };
int givemetheans=-1;
void testout(double** x, int height, int width, char*str); //将指定矩阵输出至选中的文件中
void testoutx(double x[][N], int height, int width, char*str); //将指定矩阵输出至选中的文件中
extern "C" void readbmp(FILE* fp, double pixel[][N]); //读取图像
extern "C" void writebmp(FILE* fo, unsigned char pixel[][N], int height, int width);
void createsimplingmatrix(int wid, int height, double phi[][N]); //生成观测矩阵
double error(double** piexl, double** ans, int height, int width);
void matrixmultiplicxx(double matrix1[][N], int width, double matrix2[][N],
int height, int wid2, double ans[][N]) {
int i, j, k;
// 矩阵乘法
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
ans[i][j] = 0;
for (k = 0; k < wid2; k++) {
ans[i][j] += matrix2[i][k] * matrix1[k][j];
}
}
}
}
void testoutcol(double* x, int height, char*str) {
int i;
FILE *fto = fopen(str, "wb");
for (i = 0; i < height; i++) {
fprintf(fto, "%f\t", x[i]);
}
fclose(fto);
}
double gaussrand(); //生成高斯矩阵
int main() {
FILE *fp, *fo, *fp1;
FILE *fp2, *fp3;
BMPFILEHEADER fileHeader;
BMPINF infoHeader;
long width = 256, height = 256;
int i, j;
int temp[256 * 4];
//double **pixel = NULL;
//double **simplingmatrix = NULL;
//double **compressed_matrix = NULL;
for (i = 0;i < 256;i++)
showmeans[i] = 0;
double **ans = NULL;
float ansx[M*N];
//float* ansx;
//ansx = (float*)malloc(M*N * sizeof(float));
//printf("OK\n");
//if ((fp = fopen("lena256.bmp", "rb")) == NULL) {
// printf("Cann't open lena256!\n");
// exit(0);
//}
//if ((fp1 = fopen("DWT.txt", "rb")) == NULL) {
// printf("Cann't open DWT.txt!\n");
// exit(0);
//}
//if ((fo = fopen("OK.bmp", "wb")) == NULL) {
// printf("Cann't open OK.bmp!\n");
// exit(0);
//}
////printf("OK\n");
//fseek(fp, 0, 0);
//fread(&fileHeader, sizeof(fileHeader), 1, fp);
//fwrite(&fileHeader, sizeof(fileHeader), 1, fo);
//fread(&infoHeader, sizeof(infoHeader), 1, fp);
//fwrite(&infoHeader, sizeof(infoHeader), 1, fo);
//fread(&temp, sizeof(unsigned int), 256, fp);
//fwrite(&temp, sizeof(unsigned int), 256, fo);
////printf("OK\n");
//width = infoHeader.bWidth;
//height = infoHeader.bHeight;
////printf("OK\n");
////creatmatrix(pixel,height, width);
////if (pixel == NULL)
//// return 0;
//readbmp(fp, pixel);
//printf("read OK");
//createsimplingmatrix(RR, width, simplingmatrix);
//matrixmultiplicxx(pixel, width, simplingmatrix, RR, width, compressed_matrix);
//printf("OK\n");
//creatmatrix(ww,M, N);
//for (i = 0; i<M; i++)
//for (j = 0; j<N; j++)
// fscanf(fp1, "%lf", &ww[i][j]);
//matrixturn(ww, height, height, wwturn);
//for (i = 0; i<M; i++)
//for (j = 0; j < N; j++) {
// ww1[i][j] = ww[i][j];
// wwturn1[i][j] = wwturn[i][j];
//}
printf("OK\n");
//matrixmultiplicxx(wwturn, width, compressed_matrix,
// RR, width,temp1);
//for (i = 0;i < RR;i++) {
// for (j = 0;j < N;j++)
// compressed_matrix[i][j] = temp1[i][j];
//}
//matrixmultiplicxx(wwturn, width, simplingmatrix, RR, width, temp1);
//for(i=0;i<RR;i++)
// for(j=0;j<N;j++)
//simplingmatrix[i][j] = temp1[i][j];
//ans = creatmatrix(height, width);
if ((fp2 = fopen("s.txt", "rb")) == NULL) {
printf("Cann't open DWT.txt!\n");
exit(0);
}
for (i = 0; i<190; i++)
for (j = 0; j<256; j++)
fscanf(fp2, "%lf", &simplingmatrix[i][j]);
if ((fp3 = fopen("c.txt", "rb")) == NULL) {
printf("Cann't open DWT.txt!\n");
exit(0);
}
for (i = 0; i<190; i++)
for (j = 0; j<256; j++)
fscanf(fp3, "%lf", &compressed_matrix[i][j]);
//二维数组转一维数组
//Simplingmatrix = (float*)malloc(RR * width * sizeof(float));
//Compressed_matrix = (float*)malloc(RR * width * sizeof(float));
for (i = 0; i < N; i++)
{
for (j = 0; j < RR; j++)
{
Simplingmatrix[i * RR + j] = simplingmatrix[j][i];
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < RR; j++)
{
Compressed_matrix[i * RR+ j] = compressed_matrix[j][i];
}
}
//显存分配并将已知数据传到GPU
float *dev_Simpling, *dev_Compress;
cudaMalloc((void**)&dev_Simpling, RR * width * sizeof(float));
cudaMalloc((void**)&dev_Compress, RR * width * sizeof(float));
cudaMemcpy(dev_Simpling, Simplingmatrix, RR * width * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Compress, Compressed_matrix, RR * width * sizeof(float), cudaMemcpyHostToDevice);
float *dev_final_result;
cudaMalloc((void**)&dev_final_result, N* sizeof(float));
//中间变量显存分配,dev_Aug_t和dev_temp3为RR*256,其余256*256
float *dev_Aug_t, *dev_Aug_tt, *dev_temp3, *dev_temp1, *dev_temp2;
float *r_n_temp;
cudaMalloc((void**)&r_n_temp, RR* sizeof(float));
cudaMalloc((void**)&dev_Aug_t, RR * N * sizeof(float));
cudaMalloc((void**)&dev_Aug_tt, N* N * sizeof(float));
cudaMalloc((void**)&dev_temp3, RR * N * sizeof(float));
cudaMalloc((void**)&dev_temp1, N* N * sizeof(float));
cudaMalloc((void**)&dev_temp2, N* N * sizeof(float));
float *judge2;
judge2 = (float*)malloc(sizeof(float));
//向量times*1
float *dev_aug_y;
//矩阵求逆中间变量声明与赋值
float *a_transpose, *E, *A_t, *d_transpose, *a, *a_a, *a_t, *d;
a_transpose = (float*)malloc(sizeof(float)*N*N);
E = (float*)malloc(sizeof(float)*N*N);
A_t = (float*)malloc(sizeof(float)*N*N);
d_transpose = (float*)malloc(sizeof(float)*N*N);
//a = (float*)malloc(sizeof(float)*N*N);
//a_a = (float*)malloc(sizeof(float)*N*N);
//a_t = (float*)malloc(sizeof(float)*N*N);
d = (float*)malloc(sizeof(float)*N*N);
float *dev_a, *dev_d, *dev_a_transpose, *dev_E, *dev_A_t, *dev_A_t_t, *dev_d_transpose;
cudaMalloc((void**)&dev_a, N * N * sizeof(float));
cudaMalloc((void**)&dev_d, N * N * sizeof(float));
cudaMalloc((void**)&dev_a_transpose, N * N * sizeof(float));
cudaMalloc((void**)&dev_E, N * N * sizeof(float));
cudaMalloc((void**)&dev_A_t, N * N * sizeof(float));
cudaMalloc((void**)&dev_A_t_t, N * N * sizeof(float));
cudaMalloc((void**)&dev_d_transpose, N * N * sizeof(float));
cudaMalloc((void**)&dev_aug_y, N * sizeof(float));
int c = 0;
//初始化对角矩阵
for (int i = 0; i<N; i++)
{
for (int j = 0; j<N; j++)
{
d[i*N + j] = 0;
}
}
//初始化单位矩阵
for (int i = 0; i<N; i++)
{
for (int j = 0; j<N; j++)
{
if (i == j)
{
E[i*N + j] = 1;
}
else
{
E[i*N + j] = 0;
}
}
}
//初始化临时储存矩阵
for (int i = 0; i<N; i++){
for (int j = 0; j<N; j++)
{
A_t[i*N + j] = 0;
}
}
cudaMemcpy(dev_d, d, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_d_transpose, d, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_A_t, A_t, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_E, E, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_A_t_t, A_t, N * N * sizeof(float), cudaMemcpyHostToDevice);
//最大值位置
int *dev_pos;
//最大位置对应数组
int *dev_pos_array;
//判断和标记结束
int *dev_c;
float *dev_judge1, *dev_judge2;
//范数
//float *dev_norm;
//取列
float *dev_column;
float *dev_finishomp;
float *dev_finish;
float *dev_column_temp;
float *dev_product_temp;
float *dev_product;
float *dev_r_n;
float *dev_Simp_column;
float zero[RR];
float *dev_zero;
for (i = 0; i < RR; i++)
{
zero[i] = 0;
}
cudaMalloc((void**)&dev_zero, RR * sizeof(float));
cudaMalloc((void**)&dev_finishomp, M * N *sizeof(float));
cudaMalloc((void**)&dev_finish, M * N *sizeof(float));
cudaMemcpy(dev_zero, zero, RR*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_r_n, RR * sizeof(float));
cudaMalloc((void**)&dev_Simp_column, RR * sizeof(float));
cudaMalloc((void**)&dev_product_temp, RR * sizeof(float));
cudaMalloc((void**)&dev_product, N * sizeof(float));
cudaMalloc((void**)&dev_column, RR * sizeof(float));
cudaMalloc((void**)&dev_column_temp, RR * sizeof(float));
//cudaMalloc((void**)&dev_norm, sizeof(float));
cudaMalloc((void**)&dev_pos, sizeof(int));
cudaMalloc((void**)&dev_pos_array, RR * sizeof(int));
cudaMalloc((void**)&dev_c, sizeof(int));
cudaMalloc((void**)&dev_judge1, sizeof(float));
cudaMalloc((void**)&dev_judge2, sizeof(float));
for (i = 0;i < RR;i++)
zero[i] = 0;
printf("OMP start\n");
int col;
int times;
for (i = 0; i < width; i++)
{
cudaMemcpy(dev_column, (dev_Compress + i * RR*sizeof(float)), RR*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_r_n, dev_column, RR * sizeof(float), cudaMemcpyDeviceToDevice);
for (times = 1; times <= RR; times++)
{
for (col = 0; col < N; col++)
{
cudaMemcpy(dev_Simp_column, &dev_Simpling[col * RR], RR*sizeof(float), cudaMemcpyDeviceToDevice);
vector_dot_product << <blocksPerGrid, threadsPerBlock >> >(dev_product_temp, dev_Simp_column, dev_r_n, RR);
vector_dot_sum << <1, 1 >> >(dev_product_temp);
cudaMemcpy(&dev_product[col], dev_product_temp, sizeof(float), cudaMemcpyDeviceToDevice);
}
d_find_max << <1, 1 >> >(dev_pos, dev_product, N);
int pos = 0;
cudaMemcpy(&pos, dev_pos, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&dev_Aug_tt[(times - 1)*RR], &dev_Simpling[pos * RR], RR*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(&dev_Simpling[pos*RR], dev_zero, RR*sizeof(float), cudaMemcpyDeviceToDevice);
int ax = (times + BLOCK_SIZE - 1) / BLOCK_SIZE;
int bx = (RR + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 blocks(bx, ax);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
//cudaMemcpy(showmemans, dev_Aug_tt, N*N * sizeof(float), cudaMemcpyDeviceToHost);
matrix_transpose << <blocks, threads >> >(dev_Aug_t, dev_Aug_tt, N, RR);
matMult_gpu << < N, N, sizeof(float)*RR >> >(dev_Aug_tt, dev_Aug_t, dev_temp1, N, RR, N);
cholesky << <1, THREAD_NUM, sizeof(float)*times >> >(dev_temp1, dev_d, dev_d_transpose, times);
matrix_inversion << <(times + THREAD_NUM - 1) / THREAD_NUM, THREAD_NUM >> >(dev_a, dev_E, times);
matrix_trans << <((times + THREAD_NUM - 1) / THREAD_NUM)*times, THREAD_NUM >> >(dev_E, dev_a_transpose, times);
matrix_multiply << <times, THREAD_NUM, sizeof(float)*times >> >(dev_a_transpose, dev_d_transpose, dev_A_t, times);
matrix_multiply << <times, THREAD_NUM, sizeof(float)*times >> >(dev_A_t, dev_E, dev_temp2, times);
matMult_gpu << < N, RR, sizeof(float)*N >> >(dev_temp2, dev_Aug_tt, dev_temp3, N, N, RR);
cudaMemcpy(showmemans, dev_temp2, N*N * sizeof(float), cudaMemcpyDeviceToHost);
matrix_x_vector << < (N + THREAD_NUM - 1 / THREAD_NUM), THREAD_NUM >> >(dev_aug_y, dev_temp3, dev_column, RR);
matrix_x_vector << < (RR + THREAD_NUM - 1 / THREAD_NUM), THREAD_NUM >> >(r_n_temp, dev_Aug_t, dev_aug_y,N);
vector_sub_vector << <1, RR >> >(dev_r_n, dev_column, r_n_temp, RR);
cudaMemcpy(&dev_pos_array[times - 1], dev_pos, sizeof(int), cudaMemcpyDeviceToDevice);
valueMul << <1, 1 >> >(dev_judge1, dev_aug_y, (times - 1));
vector_dot_product << <blocksPerGrid, threadsPerBlock >> >(dev_product_temp, dev_aug_y, dev_aug_y, N);
vector_dot_sum << <1, 1 >> >(dev_product_temp);
valueSqrt << <1, 1 >> >(dev_product_temp, dev_judge1, dev_judge2);
cudaMemcpy(judge2, dev_judge2, sizeof(float), cudaMemcpyDeviceToHost);
if ((*judge2) < 0.05)
{
c = times;
break;
}
}
result << <1, 1 >> >(dev_final_result, dev_aug_y, dev_pos_array, c);
cudaMemcpy((dev_finishomp + i*N*sizeof(float)), dev_final_result, N*sizeof(float), cudaMemcpyDeviceToDevice);
}
printf("OMP end\n");
int ax2 = (M + BLOCK_SIZE - 1) / BLOCK_SIZE;
int bx2 = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 blocks2(bx2, ax2);
dim3 threads2(BLOCK_SIZE, BLOCK_SIZE);
matrix_transpose << < blocks2, threads2 >> >(dev_finish, dev_finishomp, M, N);
cudaMemcpy(ansx, dev_finish, M*N*sizeof(float), cudaMemcpyDeviceToHost);
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
anss[i][j] = ansx[i*N + j];
//anss[i][j] = ans[i][j];
printf("preapare to out.txt\n");
testoutx(anss, height, width, "ans.txt");
////小波反变换
//matrixmultiplicxx(anss, width, wwturn1, height, width, tempx);
//matrixmultiplicxx(ww1, width, tempx, height, width, anss);
//printf("小波变换完成\n");
//for (i = 0; i < M; i++)
//for (j = 0; j < N; j++)
// ans[i][j] = anss[i][j];
//printf("ans 输出完成\n");
//for (i = 0; i < M; i++) {
// for (j = 0; j < N; j++) {
// if (anss[i][j] > 255) {
// ans1[i][j] = 255;
// }
// else if (anss[i][j] < 0) {
// ans1[i][j] = 0;
// }
// else {
// ans1[i][j] = anss[i][j];
// }
// }
//}
//printf("准备输出图像\n");
//writebmp(fo, ans1, height, width);
return 0;
}
void testout(double** x, int height, int width, char*str) {
int i, j;
FILE *fto = fopen(str, "wb");
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
fprintf(fto, "%f\t", x[i][j]);
}
fprintf(fto, "\n");
}
fclose(fto);
}
void testoutx(double x[][N], int height, int width, char*str) {
int i, j;
FILE *fto = fopen(str, "wb");
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
fprintf(fto, "%f\t", x[i][j]);
}
fprintf(fto, "\n");
}
fclose(fto);
}
void readbmp(FILE* fp, double pixel[][N]) {
int i, j;
for (i = N - 1; i >= 0; i--) {
for (j = 0; j < N; j++) {
pixel[i][j] = (double)fgetc(fp);
}
}
fclose(fp);
}
void createsimplingmatrix(int height, int wid, double phi[][N]) {
int i, j;
double x, p[RR][N];
for (i = 0; i < height; i++)
for (j = 0; j < wid; j++) {
phi[i][j] = 0;
p[i][j] = 0;
}
srand((int)time(NULL));
for (i = 0; i < height; i++) {
for (j = 0; j < wid; j++) {
x = gaussrand();
p[i][j] = x;
}
}
for (i = 0; i < height; i++)
for (j = 0; j < wid; j++) {
phi[i][j] = p[i][j];
}
}
double gaussrand() {
static double U, V;
static int phase = 0;
double z;
if (phase == 0)
{
U = (rand() + 1.1) / (RAND_MAX + 2.);
V = rand() / (RAND_MAX + 1.);
z = sqrt(-1 * log(U))* sin(2 * 3.141592654 * V);
}
else
{
z = sqrt(-2 * log(U)) * cos(2 * 3.141592654 * V);
}
phase = 1 - phase;
return z;
}
void writebmp(FILE* fo, unsigned char pixel[][N], int height, int width) {
int i, j;
for (i = height - 1; i >= 0; i--) {
for (j = 0; j < width; j++) {
fwrite(&pixel[j][i], sizeof(unsigned char), 1, fo);
}
}
fclose(fo);
}
double error(double** piexl, double** ans, int height, int width){
double sum = 0, psnr;
int i, j;
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
sum += pow(fabs(ans[i][j] - piexl[i][j]), 2);
psnr = 10 * log10(255 * 255 / (sum / height / width));
return psnr;
}
|
84f29f8576415a835cecf94f9bd4964f2527baa6.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSpheropolygon
template hipError_t gpu_hpmc_free_volume<ShapeSpheropolygon>(const hpmc_free_volume_args_t &args,
const typename ShapeSpheropolygon::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeSpheropolygon>(const hpmc_args_t& args,
const typename ShapeSpheropolygon::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeSpheropolygon>(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolygon::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolygon>(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolygon::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 84f29f8576415a835cecf94f9bd4964f2527baa6.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSpheropolygon
template cudaError_t gpu_hpmc_free_volume<ShapeSpheropolygon>(const hpmc_free_volume_args_t &args,
const typename ShapeSpheropolygon::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeSpheropolygon>(const hpmc_args_t& args,
const typename ShapeSpheropolygon::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeSpheropolygon>(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolygon::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolygon>(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolygon::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
d6713db675c899a2364d67583b5eea30fc5bfd26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "student.hpp"
#include "chronoGPU.hpp"
namespace IMAC {
#define MaxKernelSize 20
__constant__ float matConv_cu_const[MaxKernelSize * MaxKernelSize];
texture<uchar4, 2> tex2DRef;
__global__ void naiveConv(const uchar4* input, const uint imgWidth, const uint imgHeight, const int matSize, uchar4* output) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
const int idOut = idy * imgWidth + idx;
if(idx < imgWidth && idy < imgHeight) {
float3 sum = make_float3(0.f, 0.f, 0.f);
for (uint j = 0; j < matSize; ++j ){
for (uint i = 0; i < matSize; ++i ) {
int dX = idx + i - matSize / 2;
int dY = idy + j - matSize / 2;
// Handle borders
dX = min(max(dX, 0), imgWidth-1);
dY = min(max(dY, 0), imgHeight-1);
const int idMat = j * matSize + i;
const uchar4 pixel = tex2D(tex2DRef, dX, dY);
sum.x += (float)(pixel.x) * matConv_cu_const[idMat];
sum.y += (float)(pixel.y) * matConv_cu_const[idMat];
sum.z += (float)(pixel.z) * matConv_cu_const[idMat];
}
}
output[idOut].x = (uchar)min(max(sum.x, 0.f), 255.f);
output[idOut].y = (uchar)min(max(sum.y, 0.f), 255.f);
output[idOut].z = (uchar)min(max(sum.z, 0.f), 255.f);
output[idOut].w = 255;
}
}
void studentJob(const std::vector<uchar4> &inputImg, // Input image
const uint imgWidth, const uint imgHeight, // Image size
const std::vector<float> &matConv, // Convolution matrix (square)
const uint matSize, // Matrix size (width or height)
const std::vector<uchar4> &resultCPU, // Just for comparison
std::vector<uchar4> &output // Output image
) {
ChronoGPU chrGPU;
// variables
uchar4* input_cu = nullptr;
uchar4* output_cu = nullptr;
size_t pitch;
chrGPU.start();
std::cout << "Allocating:" << std::endl;
hipMalloc((void**)&output_cu, inputImg.size() * sizeof(uchar4));
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl;
// copy data to GPU
hipMallocPitch((void**)&input_cu, &pitch, imgWidth * sizeof(uchar4), imgHeight);
hipMemcpy2D(input_cu, pitch, inputImg.data(), imgWidth * sizeof(uchar4), imgWidth * sizeof(uchar4), imgHeight, hipMemcpyHostToDevice);
hipMemcpyToSymbol(matConv_cu_const, matConv.data(), matConv.size() * sizeof(float));
hipBindTexture2D(NULL, tex2DRef, input_cu, tex2DRef.channelDesc, imgWidth, imgHeight, pitch);
// GPU compute
std::cout << "Process:" << std::endl;
chrGPU.start();
const dim3 dimThreads(32, 32);
const dim3 dimBlock(imgWidth/dimThreads.x+1, imgHeight/dimThreads.y+1);
hipLaunchKernelGGL(( naiveConv), dim3(dimBlock), dim3(dimThreads) , 0, 0, input_cu, imgWidth, imgHeight, matSize, output_cu);
hipDeviceSynchronize();
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl;
// copy back to CPU
hipMemcpy(output.data(), output_cu, inputImg.size() * sizeof(uchar4), hipMemcpyDeviceToHost);
// free memory
hipFree(input_cu);
hipFree(output_cu);
hipFree(matConv_cu_const);
hipUnbindTexture(tex2DRef);
}
}
| d6713db675c899a2364d67583b5eea30fc5bfd26.cu | #include "student.hpp"
#include "chronoGPU.hpp"
namespace IMAC {
#define MaxKernelSize 20
__constant__ float matConv_cu_const[MaxKernelSize * MaxKernelSize];
texture<uchar4, 2> tex2DRef;
__global__ void naiveConv(const uchar4* input, const uint imgWidth, const uint imgHeight, const int matSize, uchar4* output) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y;
const int idOut = idy * imgWidth + idx;
if(idx < imgWidth && idy < imgHeight) {
float3 sum = make_float3(0.f, 0.f, 0.f);
for (uint j = 0; j < matSize; ++j ){
for (uint i = 0; i < matSize; ++i ) {
int dX = idx + i - matSize / 2;
int dY = idy + j - matSize / 2;
// Handle borders
dX = min(max(dX, 0), imgWidth-1);
dY = min(max(dY, 0), imgHeight-1);
const int idMat = j * matSize + i;
const uchar4 pixel = tex2D(tex2DRef, dX, dY);
sum.x += (float)(pixel.x) * matConv_cu_const[idMat];
sum.y += (float)(pixel.y) * matConv_cu_const[idMat];
sum.z += (float)(pixel.z) * matConv_cu_const[idMat];
}
}
output[idOut].x = (uchar)min(max(sum.x, 0.f), 255.f);
output[idOut].y = (uchar)min(max(sum.y, 0.f), 255.f);
output[idOut].z = (uchar)min(max(sum.z, 0.f), 255.f);
output[idOut].w = 255;
}
}
void studentJob(const std::vector<uchar4> &inputImg, // Input image
const uint imgWidth, const uint imgHeight, // Image size
const std::vector<float> &matConv, // Convolution matrix (square)
const uint matSize, // Matrix size (width or height)
const std::vector<uchar4> &resultCPU, // Just for comparison
std::vector<uchar4> &output // Output image
) {
ChronoGPU chrGPU;
// variables
uchar4* input_cu = nullptr;
uchar4* output_cu = nullptr;
size_t pitch;
chrGPU.start();
std::cout << "Allocating:" << std::endl;
cudaMalloc((void**)&output_cu, inputImg.size() * sizeof(uchar4));
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl;
// copy data to GPU
cudaMallocPitch((void**)&input_cu, &pitch, imgWidth * sizeof(uchar4), imgHeight);
cudaMemcpy2D(input_cu, pitch, inputImg.data(), imgWidth * sizeof(uchar4), imgWidth * sizeof(uchar4), imgHeight, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(matConv_cu_const, matConv.data(), matConv.size() * sizeof(float));
cudaBindTexture2D(NULL, tex2DRef, input_cu, tex2DRef.channelDesc, imgWidth, imgHeight, pitch);
// GPU compute
std::cout << "Process:" << std::endl;
chrGPU.start();
const dim3 dimThreads(32, 32);
const dim3 dimBlock(imgWidth/dimThreads.x+1, imgHeight/dimThreads.y+1);
naiveConv<<<dimBlock, dimThreads >>>(input_cu, imgWidth, imgHeight, matSize, output_cu);
cudaDeviceSynchronize();
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl;
// copy back to CPU
cudaMemcpy(output.data(), output_cu, inputImg.size() * sizeof(uchar4), cudaMemcpyDeviceToHost);
// free memory
cudaFree(input_cu);
cudaFree(output_cu);
cudaFree(matConv_cu_const);
cudaUnbindTexture(tex2DRef);
}
}
|
1e0601157485920e527a88155ea7e1addb9b7bb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
surface<void, 3> voxelGrid;
__constant__ int voxelGridSize;
extern "C" __global__
void compare(int32_t* result)
{
// This is not how you should code a GPU....
// But downloading GB of data to the CPU RAM
// gave me segfaults...
int offsetDim = ceil(voxelGridSize / (float)blockDim.x);
int index = threadIdx.x * offsetDim;
int intersection = 0;
int inFirst = 0;
int inSecond = 0;
for(int z = index;z<index+offsetDim && z<voxelGridSize;z++)
{
for(int y = 0;y<voxelGridSize;y++)
{
for(int x = 0;x<voxelGridSize;x++)
{
unsigned char color;
surf3Dread(&color, voxelGrid, x, y, z);
if(color == 3)
intersection++;
else if(color == 2)
inFirst++;
else if(color == 4)
inSecond++;
else if(color != 0)
printf("Unmatched %d!\n", color);
}
}
}
atomicAdd(&result[0], intersection);
atomicAdd(&result[1], inFirst);
atomicAdd(&result[2], inSecond);
}
| 1e0601157485920e527a88155ea7e1addb9b7bb1.cu | surface<void, 3> voxelGrid;
__constant__ int voxelGridSize;
extern "C" __global__
void compare(int32_t* result)
{
// This is not how you should code a GPU....
// But downloading GB of data to the CPU RAM
// gave me segfaults...
int offsetDim = ceil(voxelGridSize / (float)blockDim.x);
int index = threadIdx.x * offsetDim;
int intersection = 0;
int inFirst = 0;
int inSecond = 0;
for(int z = index;z<index+offsetDim && z<voxelGridSize;z++)
{
for(int y = 0;y<voxelGridSize;y++)
{
for(int x = 0;x<voxelGridSize;x++)
{
unsigned char color;
surf3Dread(&color, voxelGrid, x, y, z);
if(color == 3)
intersection++;
else if(color == 2)
inFirst++;
else if(color == 4)
inSecond++;
else if(color != 0)
printf("Unmatched %d!\n", color);
}
}
}
atomicAdd(&result[0], intersection);
atomicAdd(&result[1], inFirst);
atomicAdd(&result[2], inSecond);
}
|
36211be1cbd49b1cc22dded45287191aa46eeff0.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cassert>
#include <hipcub/hipcub.hpp> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
SlicePlugin *CreateSlicePluginDeserialize(const void *buffer, size_t length) {
return new SlicePlugin(buffer, length);
}
REGISTER_TRT_PLUGIN("slice_plugin", CreateSlicePluginDeserialize);
template <typename T>
__global__ void SliceKernel(int num, int dims, const T *input,
const int *offsets_info, T *output) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int shared_data[];
for (int i = threadIdx.x; i < dims * 3; i += blockDim.x) {
shared_data[i] = offsets_info[i];
}
__syncthreads();
if (idx < num) {
int t_idx = idx;
int in_idx = 0;
for (int i = dims - 1; i >= 0; i--) {
// output_shape
auto t = t_idx % shared_data[i * 3 + 1];
// out offset
auto s = t + shared_data[i * 3];
// input_seg_offset
in_idx = in_idx + shared_data[i * 3 + 2] * s;
t_idx = t_idx / shared_data[i * 3 + 1];
}
output[idx] = input[in_idx];
}
}
SlicePlugin::SlicePlugin(std::vector<int> starts, std::vector<int> ends,
std::vector<int> axes, bool with_fp16)
: starts_(starts), ends_(ends), axes_(axes) {
with_fp16_ = with_fp16;
hipEventCreate(©_event_);
hipStreamCreate(©_stream_);
}
SlicePlugin::SlicePlugin(void const *serial_data, size_t serial_length) {
deserializeBase(serial_data, serial_length);
DeserializeValue(&serial_data, &serial_length, &starts_);
DeserializeValue(&serial_data, &serial_length, &ends_);
DeserializeValue(&serial_data, &serial_length, &axes_);
hipEventCreate(©_event_);
hipStreamCreate(©_stream_);
}
SlicePlugin::~SlicePlugin() {
hipStreamDestroy(copy_stream_);
hipEventDestroy(copy_event_);
hipFree(offset_temp_data_);
}
SlicePlugin *SlicePlugin::clone() const {
return new SlicePlugin(starts_, ends_, axes_, with_fp16_);
}
bool SlicePlugin::supportsFormat(nvinfer1::DataType type,
nvinfer1::PluginFormat format) const {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kNCHW));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kNCHW));
}
}
nvinfer1::Dims SlicePlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputs,
int nb_input_dims) {
auto in_dims = inputs[0];
nvinfer1::Dims out_dims = in_dims;
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
out_dims.d[axes_[i] - 1] = end - start;
}
return out_dims;
}
int SlicePlugin::enqueue(int batch_size, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace, hipStream_t stream) {
#else
void *const *outputs, void *workspace,
hipStream_t stream) {
#endif
auto input_dims = getInputDims(0);
// notice input dims is [C, H, W], add input batch dim here
auto out_dims = getOutputDimensions(0, &input_dims, 1);
input_dims.nbDims += 1;
out_dims.nbDims += 1;
for (auto i = input_dims.nbDims; i > 0; --i) {
input_dims.d[i] = input_dims.d[i - 1];
out_dims.d[i] = out_dims.d[i - 1];
}
input_dims.d[0] = batch_size;
out_dims.d[0] = batch_size;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.resize(num_dims);
extends.resize(num_dims);
seg_offsets.resize(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
if (offset_temp_data_ == nullptr) {
hipMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int));
}
hipMemcpyAsync(offset_temp_data_, offset_info.data(),
sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice,
copy_stream_);
hipEventRecord(copy_event_, copy_stream_);
hipStreamWaitEvent(stream, copy_event_, 0);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32";
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data_, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16";
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data_, output);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
size_t SlicePlugin::getSerializationSize() {
return getBaseSerializationSize() + SerializedSize(getPluginType()) +
SerializedSize(starts_) + SerializedSize(ends_) +
SerializedSize(axes_);
}
void SlicePlugin::serialize(void *buffer) {
SerializeValue(&buffer, getPluginType());
serializeBase(buffer);
SerializeValue(&buffer, starts_);
SerializeValue(&buffer, ends_);
SerializeValue(&buffer, axes_);
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
SlicePluginDynamic::SlicePluginDynamic(std::vector<int> starts,
std::vector<int> ends,
std::vector<int> axes, bool with_fp16)
: starts_(starts), ends_(ends), axes_(axes) {
with_fp16_ = with_fp16;
hipEventCreate(©_event_);
hipStreamCreate(©_stream_);
}
SlicePluginDynamic::SlicePluginDynamic(void const *serialData,
size_t serialLength) {
DeserializeValue(&serialData, &serialLength, &starts_);
DeserializeValue(&serialData, &serialLength, &ends_);
DeserializeValue(&serialData, &serialLength, &axes_);
DeserializeValue(&serialData, &serialLength, &with_fp16_);
hipEventCreate(©_event_);
hipStreamCreate(©_stream_);
}
void SlicePluginDynamic::destroy() {
hipStreamDestroy(copy_stream_);
hipEventDestroy(copy_event_);
hipFree(offset_temp_data_);
delete this;
}
int SlicePluginDynamic::initialize() { return 0; }
size_t SlicePluginDynamic::getSerializationSize() const {
size_t size = SerializedSize(starts_) + SerializedSize(ends_) +
SerializedSize(axes_) + SerializedSize(with_fp16_);
return size;
}
void SlicePluginDynamic::serialize(void *buffer) const {
SerializeValue(&buffer, starts_);
SerializeValue(&buffer, ends_);
SerializeValue(&buffer, axes_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
auto in_dims = inputs[0];
nvinfer1::DimsExprs ret = in_dims;
// start, ends should greater 0
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
ret.d[axes_[i]] = expr_builder.constant(end - start);
}
return ret;
}
bool SlicePluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SlicePluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The Slice Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true, platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace, hipStream_t stream) {
auto input_dims = input_desc[0].dims;
auto out_dims = output_desc[0].dims;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.resize(num_dims);
extends.resize(num_dims);
seg_offsets.resize(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
if (offset_temp_data_ == nullptr) {
hipMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int));
}
hipMemcpyAsync(offset_temp_data_, offset_info.data(),
sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice,
copy_stream_);
hipEventRecord(copy_event_, copy_stream_);
hipStreamWaitEvent(stream, copy_event_, 0);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32";
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data_, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16";
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data_, output);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 36211be1cbd49b1cc22dded45287191aa46eeff0.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include <stdio.h>
#include <cassert>
#include <cub/cub.cuh> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
SlicePlugin *CreateSlicePluginDeserialize(const void *buffer, size_t length) {
return new SlicePlugin(buffer, length);
}
REGISTER_TRT_PLUGIN("slice_plugin", CreateSlicePluginDeserialize);
template <typename T>
__global__ void SliceKernel(int num, int dims, const T *input,
const int *offsets_info, T *output) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int shared_data[];
for (int i = threadIdx.x; i < dims * 3; i += blockDim.x) {
shared_data[i] = offsets_info[i];
}
__syncthreads();
if (idx < num) {
int t_idx = idx;
int in_idx = 0;
for (int i = dims - 1; i >= 0; i--) {
// output_shape
auto t = t_idx % shared_data[i * 3 + 1];
// out offset
auto s = t + shared_data[i * 3];
// input_seg_offset
in_idx = in_idx + shared_data[i * 3 + 2] * s;
t_idx = t_idx / shared_data[i * 3 + 1];
}
output[idx] = input[in_idx];
}
}
SlicePlugin::SlicePlugin(std::vector<int> starts, std::vector<int> ends,
std::vector<int> axes, bool with_fp16)
: starts_(starts), ends_(ends), axes_(axes) {
with_fp16_ = with_fp16;
cudaEventCreate(©_event_);
cudaStreamCreate(©_stream_);
}
SlicePlugin::SlicePlugin(void const *serial_data, size_t serial_length) {
deserializeBase(serial_data, serial_length);
DeserializeValue(&serial_data, &serial_length, &starts_);
DeserializeValue(&serial_data, &serial_length, &ends_);
DeserializeValue(&serial_data, &serial_length, &axes_);
cudaEventCreate(©_event_);
cudaStreamCreate(©_stream_);
}
SlicePlugin::~SlicePlugin() {
cudaStreamDestroy(copy_stream_);
cudaEventDestroy(copy_event_);
cudaFree(offset_temp_data_);
}
SlicePlugin *SlicePlugin::clone() const {
return new SlicePlugin(starts_, ends_, axes_, with_fp16_);
}
bool SlicePlugin::supportsFormat(nvinfer1::DataType type,
nvinfer1::PluginFormat format) const {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kFLOAT ||
type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kNCHW));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kNCHW));
}
}
nvinfer1::Dims SlicePlugin::getOutputDimensions(int index,
const nvinfer1::Dims *inputs,
int nb_input_dims) {
auto in_dims = inputs[0];
nvinfer1::Dims out_dims = in_dims;
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
out_dims.d[axes_[i] - 1] = end - start;
}
return out_dims;
}
int SlicePlugin::enqueue(int batch_size, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace, cudaStream_t stream) {
#else
void *const *outputs, void *workspace,
cudaStream_t stream) {
#endif
auto input_dims = getInputDims(0);
// notice input dims is [C, H, W], add input batch dim here
auto out_dims = getOutputDimensions(0, &input_dims, 1);
input_dims.nbDims += 1;
out_dims.nbDims += 1;
for (auto i = input_dims.nbDims; i > 0; --i) {
input_dims.d[i] = input_dims.d[i - 1];
out_dims.d[i] = out_dims.d[i - 1];
}
input_dims.d[0] = batch_size;
out_dims.d[0] = batch_size;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.resize(num_dims);
extends.resize(num_dims);
seg_offsets.resize(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
if (offset_temp_data_ == nullptr) {
cudaMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int));
}
cudaMemcpyAsync(offset_temp_data_, offset_info.data(),
sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice,
copy_stream_);
cudaEventRecord(copy_event_, copy_stream_);
cudaStreamWaitEvent(stream, copy_event_, 0);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32";
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data_, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16";
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data_, output);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
size_t SlicePlugin::getSerializationSize() {
return getBaseSerializationSize() + SerializedSize(getPluginType()) +
SerializedSize(starts_) + SerializedSize(ends_) +
SerializedSize(axes_);
}
void SlicePlugin::serialize(void *buffer) {
SerializeValue(&buffer, getPluginType());
serializeBase(buffer);
SerializeValue(&buffer, starts_);
SerializeValue(&buffer, ends_);
SerializeValue(&buffer, axes_);
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
SlicePluginDynamic::SlicePluginDynamic(std::vector<int> starts,
std::vector<int> ends,
std::vector<int> axes, bool with_fp16)
: starts_(starts), ends_(ends), axes_(axes) {
with_fp16_ = with_fp16;
cudaEventCreate(©_event_);
cudaStreamCreate(©_stream_);
}
SlicePluginDynamic::SlicePluginDynamic(void const *serialData,
size_t serialLength) {
DeserializeValue(&serialData, &serialLength, &starts_);
DeserializeValue(&serialData, &serialLength, &ends_);
DeserializeValue(&serialData, &serialLength, &axes_);
DeserializeValue(&serialData, &serialLength, &with_fp16_);
cudaEventCreate(©_event_);
cudaStreamCreate(©_stream_);
}
void SlicePluginDynamic::destroy() {
cudaStreamDestroy(copy_stream_);
cudaEventDestroy(copy_event_);
cudaFree(offset_temp_data_);
delete this;
}
int SlicePluginDynamic::initialize() { return 0; }
size_t SlicePluginDynamic::getSerializationSize() const {
size_t size = SerializedSize(starts_) + SerializedSize(ends_) +
SerializedSize(axes_) + SerializedSize(with_fp16_);
return size;
}
void SlicePluginDynamic::serialize(void *buffer) const {
SerializeValue(&buffer, starts_);
SerializeValue(&buffer, ends_);
SerializeValue(&buffer, axes_);
SerializeValue(&buffer, with_fp16_);
}
nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
auto in_dims = inputs[0];
nvinfer1::DimsExprs ret = in_dims;
// start, ends should greater 0
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
ret.d[axes_[i]] = expr_builder.constant(end - start);
}
return ret;
}
bool SlicePluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SlicePluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The Slice Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true, platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace, cudaStream_t stream) {
auto input_dims = input_desc[0].dims;
auto out_dims = output_desc[0].dims;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.resize(num_dims);
extends.resize(num_dims);
seg_offsets.resize(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
if (offset_temp_data_ == nullptr) {
cudaMalloc(&offset_temp_data_, 3 * num_dims * sizeof(int));
}
cudaMemcpyAsync(offset_temp_data_, offset_info.data(),
sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice,
copy_stream_);
cudaEventRecord(copy_event_, copy_stream_);
cudaStreamWaitEvent(stream, copy_event_, 0);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp32";
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data_, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. Slice-->fp16";
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data_, output);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
bb6450e07c4dc021ece5c8914eed4acf95237219.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void print1D(int* input) {
int axis_x = blockIdx.x * blockDim.x + threadIdx.x;
// printf("%d\n", axis_x);
int gid = axis_x;
// input[gid] = gid;
printf(
"gridDim(%d), blockDim(%d), blockIdx(%d), threadIdx(%d), input(%d), "
"gid(%d)\n",
gridDim.x, blockDim.x, blockIdx.x, threadIdx.x, input[gid], gid);
}
__global__ void print2D(int* input) {
int axis_x = blockIdx.x * blockDim.x + threadIdx.x; // col
int axis_y = blockIdx.y * blockDim.y + threadIdx.y; // row
// printf("%d, %d\n", axis_x, axis_y);
int gid = axis_y * gridDim.x * blockDim.x + axis_x;
// input[gid] = gid;
printf(
"gridDim(%d,%d), blockDim(%d,%d), blockIdx(%d,%d), threadIdx(%d,%d), "
"input(%d), gid(%d)\n",
gridDim.x, gridDim.y, blockDim.x, blockDim.y, blockIdx.x, blockIdx.y,
threadIdx.x, threadIdx.y, input[gid], gid);
}
__global__ void print3D(int* input) {
int axis_x = blockIdx.x * blockDim.x + threadIdx.x;
int axis_y = blockIdx.y * blockDim.y + threadIdx.y;
int axis_z = blockIdx.z * blockDim.z + threadIdx.z;
// printf("%d, %d, %d\n", axis_x, axis_y, axis_z);
int gid = axis_z * blockDim.x * gridDim.x * blockDim.y * gridDim.y +
axis_y * blockDim.x * gridDim.x + axis_x;
// input[gid] = gid;
printf(
"gridDim(%d,%d,%d), blockDim(%d,%d,%d), blockIdx(%d,%d,%d), "
"threadIdx(%d,%d,%d), input(%d), gid(%d)\n",
gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z,
blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z,
input[gid], gid);
}
void initInput(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int main(void) {
dim3 size(2, 4, 8);
dim3 block_dim(0);
dim3 grid_dim(0);
int* h_input = NULL;
int* d_input = NULL;
//// 1D
printf("\nprint 1D:\n");
h_input = (int*)calloc(size.x, sizeof(int));
initInput(h_input, size.x);
hipMalloc((void**)&d_input, size.x * sizeof(int));
hipMemcpy(d_input, h_input, size.x * sizeof(int), hipMemcpyHostToDevice);
block_dim.x = 2;
grid_dim.x = size.x / block_dim.x;
hipLaunchKernelGGL(( print1D), dim3(grid_dim), dim3(block_dim), 0, 0, d_input);
hipDeviceSynchronize();
hipMemcpy(h_input, d_input, size.x * sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_input);
free(h_input);
//// 2D
printf("\nprint 2D:\n");
h_input = (int*)calloc(size.x * size.y, sizeof(int));
initInput(h_input, size.x * size.y);
hipMalloc((void**)&d_input, size.x * size.y * sizeof(int));
hipMemcpy(d_input, h_input, size.x * size.y * sizeof(int),
hipMemcpyHostToDevice);
block_dim.y = 4;
grid_dim.y = size.y / block_dim.y;
hipLaunchKernelGGL(( print2D), dim3(grid_dim), dim3(block_dim), 0, 0, d_input);
hipDeviceSynchronize();
hipMemcpy(h_input, d_input, size.x * size.y * sizeof(int),
hipMemcpyDeviceToHost);
hipFree(d_input);
free(h_input);
//// 3D
printf("\nprint 3D:\n");
h_input = (int*)calloc(size.x * size.y * size.z, sizeof(int));
initInput(h_input, size.x * size.y * size.z);
hipMalloc((void**)&d_input, size.x * size.y * size.z * sizeof(int));
hipMemcpy(d_input, h_input, size.x * size.y * size.z * sizeof(int),
hipMemcpyHostToDevice);
block_dim.z = 8;
grid_dim.z = size.z / block_dim.z;
hipLaunchKernelGGL(( print3D), dim3(grid_dim), dim3(block_dim), 0, 0, d_input);
hipDeviceSynchronize();
hipMemcpy(h_input, d_input, size.x * size.y * size.z * sizeof(int),
hipMemcpyDeviceToHost);
hipFree(d_input);
free(h_input);
//// reset
hipDeviceReset();
}
| bb6450e07c4dc021ece5c8914eed4acf95237219.cu | #include <stdio.h>
__global__ void print1D(int* input) {
int axis_x = blockIdx.x * blockDim.x + threadIdx.x;
// printf("%d\n", axis_x);
int gid = axis_x;
// input[gid] = gid;
printf(
"gridDim(%d), blockDim(%d), blockIdx(%d), threadIdx(%d), input(%d), "
"gid(%d)\n",
gridDim.x, blockDim.x, blockIdx.x, threadIdx.x, input[gid], gid);
}
__global__ void print2D(int* input) {
int axis_x = blockIdx.x * blockDim.x + threadIdx.x; // col
int axis_y = blockIdx.y * blockDim.y + threadIdx.y; // row
// printf("%d, %d\n", axis_x, axis_y);
int gid = axis_y * gridDim.x * blockDim.x + axis_x;
// input[gid] = gid;
printf(
"gridDim(%d,%d), blockDim(%d,%d), blockIdx(%d,%d), threadIdx(%d,%d), "
"input(%d), gid(%d)\n",
gridDim.x, gridDim.y, blockDim.x, blockDim.y, blockIdx.x, blockIdx.y,
threadIdx.x, threadIdx.y, input[gid], gid);
}
__global__ void print3D(int* input) {
int axis_x = blockIdx.x * blockDim.x + threadIdx.x;
int axis_y = blockIdx.y * blockDim.y + threadIdx.y;
int axis_z = blockIdx.z * blockDim.z + threadIdx.z;
// printf("%d, %d, %d\n", axis_x, axis_y, axis_z);
int gid = axis_z * blockDim.x * gridDim.x * blockDim.y * gridDim.y +
axis_y * blockDim.x * gridDim.x + axis_x;
// input[gid] = gid;
printf(
"gridDim(%d,%d,%d), blockDim(%d,%d,%d), blockIdx(%d,%d,%d), "
"threadIdx(%d,%d,%d), input(%d), gid(%d)\n",
gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z,
blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z,
input[gid], gid);
}
void initInput(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int main(void) {
dim3 size(2, 4, 8);
dim3 block_dim(0);
dim3 grid_dim(0);
int* h_input = NULL;
int* d_input = NULL;
//// 1D
printf("\nprint 1D:\n");
h_input = (int*)calloc(size.x, sizeof(int));
initInput(h_input, size.x);
cudaMalloc((void**)&d_input, size.x * sizeof(int));
cudaMemcpy(d_input, h_input, size.x * sizeof(int), cudaMemcpyHostToDevice);
block_dim.x = 2;
grid_dim.x = size.x / block_dim.x;
print1D<<<grid_dim, block_dim>>>(d_input);
cudaDeviceSynchronize();
cudaMemcpy(h_input, d_input, size.x * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_input);
free(h_input);
//// 2D
printf("\nprint 2D:\n");
h_input = (int*)calloc(size.x * size.y, sizeof(int));
initInput(h_input, size.x * size.y);
cudaMalloc((void**)&d_input, size.x * size.y * sizeof(int));
cudaMemcpy(d_input, h_input, size.x * size.y * sizeof(int),
cudaMemcpyHostToDevice);
block_dim.y = 4;
grid_dim.y = size.y / block_dim.y;
print2D<<<grid_dim, block_dim>>>(d_input);
cudaDeviceSynchronize();
cudaMemcpy(h_input, d_input, size.x * size.y * sizeof(int),
cudaMemcpyDeviceToHost);
cudaFree(d_input);
free(h_input);
//// 3D
printf("\nprint 3D:\n");
h_input = (int*)calloc(size.x * size.y * size.z, sizeof(int));
initInput(h_input, size.x * size.y * size.z);
cudaMalloc((void**)&d_input, size.x * size.y * size.z * sizeof(int));
cudaMemcpy(d_input, h_input, size.x * size.y * size.z * sizeof(int),
cudaMemcpyHostToDevice);
block_dim.z = 8;
grid_dim.z = size.z / block_dim.z;
print3D<<<grid_dim, block_dim>>>(d_input);
cudaDeviceSynchronize();
cudaMemcpy(h_input, d_input, size.x * size.y * size.z * sizeof(int),
cudaMemcpyDeviceToHost);
cudaFree(d_input);
free(h_input);
//// reset
cudaDeviceReset();
}
|
9ca3a62feb732bdcbeb66d582dabb62e3c24adaf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<iostream>
#include <sys/time.h>
#include<bits/stdc++.h>
using namespace std;
struct edgepairs{
int x;
int y;
};
bool compareTwoEdgePairs(edgepairs a, edgepairs b)
{
if (a.x != b.x)
return a.x < b.x;
if (a.y != b.y)
return a.y < b.y;
return true;
}
// complete the following kernel...
__global__ void dkernel_Adds(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
}
// complete the following kernel...
__global__ void dkernel_Mins(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
}
// complete the following kernel...
__global__ void dkernel_Maxs(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
}
int main(int argc,char **argv){
//variable declarations
int m,n;
int number;
int numofquery;
int op;
struct timeval t1, t2;
vector <double> kerneltime;
//File pointer declaration
FILE *filePointer;
//File Opening for read
char *filename = argv[1];
filePointer = fopen( filename , "r") ;
//checking if file ptr is NULL
if ( filePointer == NULL )
{
printf( "input.txt file failed to open." ) ;
return 0;
}
fscanf(filePointer, "%d", &n ); //scaning the number of vertices
fscanf(filePointer, "%d", &m ); //scaning the number of edges
//D.S to store the input graph in COO format
vector <edgepairs> COO(m);
//Reading from file and populate the COO
for(int i=0 ; i<m ; i++ )
{
for(int j=0;j<2;j++){
if ( fscanf(filePointer, "%d", &number) != 1)
break;
if( j%2 == 0)
{
if(number >= 1 && number <= 10000)
COO[i].x = number;
}
else
{
if(number >= 1 && number <= 10000)
COO[i].y = number;
}
}
}
// COO done...
// sort the COO
sort(COO.begin(),COO.end(),compareTwoEdgePairs);
//sorting COO done..
// Converting the graph in COO format to CSR format..
// create the CSR
int *OA = (int *)malloc( (n+1)*sizeof(int)); //Offsets Array
for(int i=0;i<n+1;i++){
OA[i] = 0;
}
int *CA = (int *)malloc(m*sizeof(int)); //Coordinates Array
OA[0]=0;
//initialize the Coordinates Array
for(int i=0;i<m;i++){
if(COO[i].y >= 1 && COO[i].y <= 10000)
CA[i] = COO[i].y - 1;
}
//initialize the Offsets Array
for(int i=0;i<m;i++){
if(COO[i].x >= 1 && COO[i].x <= 10000)
OA[COO[i].x]++; //store the frequency..
}
for(int i=0;i<n;i++){
OA[i+1] += OA[i]; // do cumulative sum..
}
// Converting the graph to CSR done..
// copy initial local values to the array from the file
int *initlocalvals = (int *)malloc(n*sizeof(int));;
for(int i=0 ; i<n ; i++ )
{
if ( fscanf(filePointer, "%d", &number) != 1)
break;
initlocalvals[i] = number;
}
// copying local vals end..
// get number of queries from the file
fscanf(filePointer, "%d", &numofquery);
//copy OA,CA and initlocalvals to the GPU Memory
int *gpuOA, *gpuCA, *gpulocals;
hipMalloc( &gpuOA, sizeof(int) * (1+n) );
hipMalloc( &gpuCA, sizeof(int) * m );
hipMalloc( &gpulocals, sizeof(int) * n );
hipMemcpy(gpuOA, OA, sizeof(int) * (1+n), hipMemcpyHostToDevice);
hipMemcpy(gpuCA, CA, sizeof(int) * m, hipMemcpyHostToDevice);
hipMemcpy(gpulocals, initlocalvals, sizeof(int) * n, hipMemcpyHostToDevice);
int *currentupdate = (int *)malloc(n*sizeof(int)); // array to store the updates that are pushed by each vertex to there neighbors
int *gpucurrentupdate; // same as above but on GPU
hipMalloc( &gpucurrentupdate, sizeof(int) * n );
int *results = (int *)malloc(n*sizeof(int)); // storing the results from GPU to CPU for the enumerate query
// open the output.txt to write the query results
char *fname = argv[2];
FILE *fptr;
fptr = fopen(fname,"w");
for(int i=0;i<numofquery;i++){
//read the operator
fscanf(filePointer, "%d", &op);
if(op != 3){ // if operator is other then enumerate (i.e. +,min,max)
// read the current updates in the array
for(int j=0 ; j<n ; j++ )
{
if ( fscanf(filePointer, "%d", &number) != 1)
break;
currentupdate[j] = number;
}
// copy current updates to gpu
hipMemcpy(gpucurrentupdate, currentupdate, sizeof(int) * n, hipMemcpyHostToDevice);
//kernel launches
if(op == 0) {
gettimeofday(&t1, 0);
hipLaunchKernelGGL(( dkernel_Adds), dim3(n),dim3(1), 0, 0, gpuOA,gpuCA,gpulocals,gpucurrentupdate);
hipDeviceSynchronize();
gettimeofday(&t2, 0);
}
if(op == 1) {
gettimeofday(&t1, 0);
hipLaunchKernelGGL(( dkernel_Mins), dim3(n),dim3(1), 0, 0, gpuOA,gpuCA,gpulocals,gpucurrentupdate);
hipDeviceSynchronize();
gettimeofday(&t2, 0);
}
if(op == 2) {
gettimeofday(&t1, 0);
hipLaunchKernelGGL(( dkernel_Maxs), dim3(n),dim3(1), 0, 0, gpuOA,gpuCA,gpulocals,gpucurrentupdate);
hipDeviceSynchronize();
gettimeofday(&t2, 0);
}
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; // Time taken by kernel in seconds
kerneltime.push_back(time);
printf("Time taken by kernel to execute is: %.6f ms\n", time);
}
else{ // if operator is enumnerate then store the results to file
//print local values of each vertices.
hipMemcpy(results, gpulocals, n * sizeof(int), hipMemcpyDeviceToHost); // get each locals from GPU
for(int j=0;j<n;j++){
fprintf(fptr ,"%d ", results[j] );
}
fprintf(fptr,"\n");
}
}
int nall = kerneltime.size();
double sumtime=0;
for(int i=0;i<nall;i++){
sumtime += kerneltime[i];
}
// print the time taken by all the kernels of the current test-case
cout << "\ntotal time taken by the current test-case is " << sumtime << " ms\n";
fclose(fptr);
fclose(filePointer);
return 0;
}
| 9ca3a62feb732bdcbeb66d582dabb62e3c24adaf.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<iostream>
#include <sys/time.h>
#include<bits/stdc++.h>
using namespace std;
struct edgepairs{
int x;
int y;
};
bool compareTwoEdgePairs(edgepairs a, edgepairs b)
{
if (a.x != b.x)
return a.x < b.x;
if (a.y != b.y)
return a.y < b.y;
return true;
}
// complete the following kernel...
__global__ void dkernel_Adds(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
}
// complete the following kernel...
__global__ void dkernel_Mins(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
}
// complete the following kernel...
__global__ void dkernel_Maxs(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
}
int main(int argc,char **argv){
//variable declarations
int m,n;
int number;
int numofquery;
int op;
struct timeval t1, t2;
vector <double> kerneltime;
//File pointer declaration
FILE *filePointer;
//File Opening for read
char *filename = argv[1];
filePointer = fopen( filename , "r") ;
//checking if file ptr is NULL
if ( filePointer == NULL )
{
printf( "input.txt file failed to open." ) ;
return 0;
}
fscanf(filePointer, "%d", &n ); //scaning the number of vertices
fscanf(filePointer, "%d", &m ); //scaning the number of edges
//D.S to store the input graph in COO format
vector <edgepairs> COO(m);
//Reading from file and populate the COO
for(int i=0 ; i<m ; i++ )
{
for(int j=0;j<2;j++){
if ( fscanf(filePointer, "%d", &number) != 1)
break;
if( j%2 == 0)
{
if(number >= 1 && number <= 10000)
COO[i].x = number;
}
else
{
if(number >= 1 && number <= 10000)
COO[i].y = number;
}
}
}
// COO done...
// sort the COO
sort(COO.begin(),COO.end(),compareTwoEdgePairs);
//sorting COO done..
// Converting the graph in COO format to CSR format..
// create the CSR
int *OA = (int *)malloc( (n+1)*sizeof(int)); //Offsets Array
for(int i=0;i<n+1;i++){
OA[i] = 0;
}
int *CA = (int *)malloc(m*sizeof(int)); //Coordinates Array
OA[0]=0;
//initialize the Coordinates Array
for(int i=0;i<m;i++){
if(COO[i].y >= 1 && COO[i].y <= 10000)
CA[i] = COO[i].y - 1;
}
//initialize the Offsets Array
for(int i=0;i<m;i++){
if(COO[i].x >= 1 && COO[i].x <= 10000)
OA[COO[i].x]++; //store the frequency..
}
for(int i=0;i<n;i++){
OA[i+1] += OA[i]; // do cumulative sum..
}
// Converting the graph to CSR done..
// copy initial local values to the array from the file
int *initlocalvals = (int *)malloc(n*sizeof(int));;
for(int i=0 ; i<n ; i++ )
{
if ( fscanf(filePointer, "%d", &number) != 1)
break;
initlocalvals[i] = number;
}
// copying local vals end..
// get number of queries from the file
fscanf(filePointer, "%d", &numofquery);
//copy OA,CA and initlocalvals to the GPU Memory
int *gpuOA, *gpuCA, *gpulocals;
cudaMalloc( &gpuOA, sizeof(int) * (1+n) );
cudaMalloc( &gpuCA, sizeof(int) * m );
cudaMalloc( &gpulocals, sizeof(int) * n );
cudaMemcpy(gpuOA, OA, sizeof(int) * (1+n), cudaMemcpyHostToDevice);
cudaMemcpy(gpuCA, CA, sizeof(int) * m, cudaMemcpyHostToDevice);
cudaMemcpy(gpulocals, initlocalvals, sizeof(int) * n, cudaMemcpyHostToDevice);
int *currentupdate = (int *)malloc(n*sizeof(int)); // array to store the updates that are pushed by each vertex to there neighbors
int *gpucurrentupdate; // same as above but on GPU
cudaMalloc( &gpucurrentupdate, sizeof(int) * n );
int *results = (int *)malloc(n*sizeof(int)); // storing the results from GPU to CPU for the enumerate query
// open the output.txt to write the query results
char *fname = argv[2];
FILE *fptr;
fptr = fopen(fname,"w");
for(int i=0;i<numofquery;i++){
//read the operator
fscanf(filePointer, "%d", &op);
if(op != 3){ // if operator is other then enumerate (i.e. +,min,max)
// read the current updates in the array
for(int j=0 ; j<n ; j++ )
{
if ( fscanf(filePointer, "%d", &number) != 1)
break;
currentupdate[j] = number;
}
// copy current updates to gpu
cudaMemcpy(gpucurrentupdate, currentupdate, sizeof(int) * n, cudaMemcpyHostToDevice);
//kernel launches
if(op == 0) {
gettimeofday(&t1, 0);
dkernel_Adds<<<n,1>>>(gpuOA,gpuCA,gpulocals,gpucurrentupdate);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
}
if(op == 1) {
gettimeofday(&t1, 0);
dkernel_Mins<<<n,1>>>(gpuOA,gpuCA,gpulocals,gpucurrentupdate);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
}
if(op == 2) {
gettimeofday(&t1, 0);
dkernel_Maxs<<<n,1>>>(gpuOA,gpuCA,gpulocals,gpucurrentupdate);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
}
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; // Time taken by kernel in seconds
kerneltime.push_back(time);
printf("Time taken by kernel to execute is: %.6f ms\n", time);
}
else{ // if operator is enumnerate then store the results to file
//print local values of each vertices.
cudaMemcpy(results, gpulocals, n * sizeof(int), cudaMemcpyDeviceToHost); // get each locals from GPU
for(int j=0;j<n;j++){
fprintf(fptr ,"%d ", results[j] );
}
fprintf(fptr,"\n");
}
}
int nall = kerneltime.size();
double sumtime=0;
for(int i=0;i<nall;i++){
sumtime += kerneltime[i];
}
// print the time taken by all the kernels of the current test-case
cout << "\ntotal time taken by the current test-case is " << sumtime << " ms\n";
fclose(fptr);
fclose(filePointer);
return 0;
}
|
25304e44bfb1e6ee61ae64bd62308ba4e684112f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void standard_kernel(float a, float *out, int iters)
{
int i;
int tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if(tid == 0)
{
float tmp;
for (i = 0; i < iters; i++)
{
tmp = powf(a, 2.0f);
}
*out = tmp;
}
} | 25304e44bfb1e6ee61ae64bd62308ba4e684112f.cu | #include "includes.h"
__global__ void standard_kernel(float a, float *out, int iters)
{
int i;
int tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if(tid == 0)
{
float tmp;
for (i = 0; i < iters; i++)
{
tmp = powf(a, 2.0f);
}
*out = tmp;
}
} |
9094581c1fb778bd382521d388fd1a6df5677e83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/tuple.h>
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.hpp"
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md
static const __device__ __constant__ uint8_t g_list2struct[16] = {0,
1,
2,
ST_FLD_BYTE,
ST_FLD_DOUBLE,
5,
ST_FLD_I16,
7,
ST_FLD_I32,
9,
ST_FLD_I64,
ST_FLD_BINARY,
ST_FLD_STRUCT,
ST_FLD_MAP,
ST_FLD_SET,
ST_FLD_LIST};
struct byte_stream_s {
const uint8_t* cur;
const uint8_t* end;
const uint8_t* base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
/**
* @brief Get current byte from the byte stream
*
* @param[in] bs Byte stream
*
* @return Current byte pointed to by the byte stream
*/
inline __device__ unsigned int getb(byte_stream_s* bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s* bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
/**
* @brief Decode unsigned integer from a byte stream using VarInt encoding
*
* Concatenate least significant 7 bits of each byte to form a 32 bit
* integer. Most significant bit of each byte indicates if more bytes
* are to be used to form the number.
*
* @param[in] bs Byte stream
*
* @return Decoded 32 bit integer
*/
__device__ uint32_t get_u32(byte_stream_s* bs)
{
uint32_t v = 0, l = 0, c;
do {
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
/**
* @brief Decode signed integer from a byte stream using zigzag encoding
*
* The number n encountered in a byte stream translates to
* -1^(n%2) * ceil(n/2), with the exception of 0 which remains the same.
* i.e. 0, 1, 2, 3, 4, 5 etc convert to 0, -1, 1, -2, 2 respectively.
*
* @param[in] bs Byte stream
*
* @return Decoded 32 bit integer
*/
inline __device__ int32_t get_i32(byte_stream_s* bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s* bs, int field_type)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0) {
rep_cnt--;
} else if (struct_depth != 0) {
unsigned int c;
do {
c = getb(bs);
if (!c) --struct_depth;
} while (!c && struct_depth);
if (!struct_depth) break;
field_type = c & 0xf;
if (!(c & 0xf0)) get_i32(bs);
}
switch (field_type) {
case ST_FLD_TRUE:
case ST_FLD_FALSE: break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64: get_u32(bs); break;
case ST_FLD_BYTE: skip_bytes(bs, 1); break;
case ST_FLD_DOUBLE: skip_bytes(bs, 8); break;
case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break;
case ST_FLD_LIST:
case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled
auto const c = getb(bs);
int n = c >> 4;
if (n == 0xf) n = get_u32(bs);
field_type = g_list2struct[c & 0xf];
if (field_type == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
} break;
case ST_FLD_STRUCT: struct_depth++; break;
}
} while (rep_cnt || struct_depth);
}
/**
* @brief Functor to set value to 32 bit integer read from byte stream
*
* @return True if field type is not int32
*/
struct ParquetFieldInt32 {
int field;
int32_t& val;
__device__ ParquetFieldInt32(int f, int32_t& v) : field(f), val(v) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
val = get_i32(bs);
return (field_type != ST_FLD_I32);
}
};
/**
* @brief Functor to set value to enum read from byte stream
*
* @return True if field type is not int32
*/
template <typename Enum>
struct ParquetFieldEnum {
int field;
Enum& val;
__device__ ParquetFieldEnum(int f, Enum& v) : field(f), val(v) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
val = static_cast<Enum>(get_i32(bs));
return (field_type != ST_FLD_I32);
}
};
/**
* @brief Functor to run operator on byte stream
*
* @return True if field type is not struct type or if the calling operator
* fails
*/
template <typename Operator>
struct ParquetFieldStruct {
int field;
Operator op;
__device__ ParquetFieldStruct(int f) : field(f) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
return ((field_type != ST_FLD_STRUCT) || !op(bs));
}
};
/**
* @brief Functor to run an operator
*
* The purpose of this functor is to replace a switch case. If the field in
* the argument is equal to the field specified in any element of the tuple
* of operators then it is run with the byte stream and field type arguments.
*
* If the field does not match any of the functors then skip_struct_field is
* called over the byte stream.
*
* @return Return value of the selected operator or false if no operator
* matched the field value
*/
template <int index>
struct FunctionSwitchImpl {
template <typename... Operator>
static inline __device__ bool run(byte_stream_s* bs,
int field_type,
const int& field,
thrust::tuple<Operator...>& ops)
{
if (field == thrust::get<index>(ops).field) {
return thrust::get<index>(ops)(bs, field_type);
} else {
return FunctionSwitchImpl<index - 1>::run(bs, field_type, field, ops);
}
}
};
template <>
struct FunctionSwitchImpl<0> {
template <typename... Operator>
static inline __device__ bool run(byte_stream_s* bs,
int field_type,
const int& field,
thrust::tuple<Operator...>& ops)
{
if (field == thrust::get<0>(ops).field) {
return thrust::get<0>(ops)(bs, field_type);
} else {
skip_struct_field(bs, field_type);
return false;
}
}
};
/**
* @brief Function to parse page header based on the tuple of functors provided
*
* Bytes are read from the byte stream and the field delta and field type are
* matched up against user supplied reading functors. If they match then the
* corresponding values are written to references pointed to by the functors.
*
* @return Returns false if an unexpected field is encountered while reading
* byte stream. Otherwise true is returned.
*/
template <typename... Operator>
inline __device__ bool parse_header(thrust::tuple<Operator...>& op, byte_stream_s* bs)
{
constexpr int index = thrust::tuple_size<thrust::tuple<Operator...>>::value - 1;
int field = 0;
while (true) {
auto const current_byte = getb(bs);
if (!current_byte) break;
int const field_delta = current_byte >> 4;
int const field_type = current_byte & 0xf;
field = field_delta ? field + field_delta : get_i32(bs);
bool exit_function = FunctionSwitchImpl<index>::run(bs, field_type, field, op);
if (exit_function) { return false; }
}
return true;
}
struct gpuParseDataPageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldEnum<Encoding>(2, bs->page.encoding),
ParquetFieldEnum<Encoding>(3, bs->page.definition_level_encoding),
ParquetFieldEnum<Encoding>(4, bs->page.repetition_level_encoding));
return parse_header(op, bs);
}
};
struct gpuParseDictionaryPageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldEnum<Encoding>(2, bs->page.encoding));
return parse_header(op, bs);
}
};
struct gpuParseDataPageHeaderV2 {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldInt32(3, bs->page.num_rows),
ParquetFieldEnum<Encoding>(4, bs->page.encoding),
ParquetFieldEnum<Encoding>(5, bs->page.definition_level_encoding),
ParquetFieldEnum<Encoding>(6, bs->page.repetition_level_encoding));
return parse_header(op, bs);
}
};
struct gpuParsePageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldEnum<PageType>(1, bs->page_type),
ParquetFieldInt32(2, bs->page.uncompressed_page_size),
ParquetFieldInt32(3, bs->page.compressed_page_size),
ParquetFieldStruct<gpuParseDataPageHeader>(5),
ParquetFieldStruct<gpuParseDictionaryPageHeader>(7),
ParquetFieldStruct<gpuParseDataPageHeaderV2>(8));
return parse_header(op, bs);
}
};
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc* chunks, int32_t num_chunks)
{
gpuParsePageHeader parse_page_header;
__shared__ byte_stream_s bs_g[4];
int lane_id = threadIdx.x % 32;
int chunk = (blockIdx.x * 4) + (threadIdx.x / 32);
byte_stream_s* const bs = &bs_g[threadIdx.x / 32];
if (chunk < num_chunks and lane_id == 0) bs->ck = chunks[chunk];
__syncthreads();
if (chunk < num_chunks) {
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo* page_info;
if (!lane_id) {
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.src_col_schema = bs->ck.src_col_schema;
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
__syncwarp();
while (values_found < num_values && bs->cur < bs->end) {
int index_out = -1;
if (lane_id == 0) {
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
if (parse_page_header(bs) && bs->page.compressed_page_size >= 0) {
switch (bs->page_type) {
case PageType::DATA_PAGE:
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.num_rows = bs->page.num_input_values;
case PageType::DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags = 0;
values_found += bs->page.num_input_values;
break;
case PageType::DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags = PAGEINFO_FLAGS_DICTIONARY;
break;
default: index_out = -1; break;
}
bs->page.page_data = const_cast<uint8_t*>(bs->cur);
bs->cur += bs->page.compressed_page_size;
} else {
bs->cur = bs->end;
}
}
index_out = shuffle(index_out);
if (index_out >= 0 && index_out < max_num_pages && lane_id == 0)
page_info[index_out] = bs->page;
num_values = shuffle(num_values);
__syncwarp();
}
if (lane_id == 0) {
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc* chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int lane_id = threadIdx.x % 32;
int chunk = (blockIdx.x * 4) + (threadIdx.x / 32);
ColumnChunkDesc* const ck = &chunk_g[threadIdx.x / 32];
if (chunk < num_chunks and lane_id == 0) *ck = chunks[chunk];
__syncthreads();
if (chunk >= num_chunks) { return; }
if (!lane_id && ck->num_dict_pages > 0 && ck->str_dict_index) {
// Data type to describe a string
string_index_pair* dict_index = ck->str_dict_index;
const uint8_t* dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_input_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++) {
int len = 0;
if (cur + 4 <= dict_size) {
len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size) {
pos = cur;
cur = cur + 4 + len;
} else {
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].first = reinterpret_cast<const char*>(dict + pos + 4);
dict_index[i].second = len;
}
}
}
void __host__ DecodePageHeaders(ColumnChunkDesc* chunks,
int32_t num_chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuDecodePageHeaders), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, num_chunks);
}
void __host__ BuildStringDictionaryIndex(ColumnChunkDesc* chunks,
int32_t num_chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuBuildStringDictionaryIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, num_chunks);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| 9094581c1fb778bd382521d388fd1a6df5677e83.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/tuple.h>
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.hpp"
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md
static const __device__ __constant__ uint8_t g_list2struct[16] = {0,
1,
2,
ST_FLD_BYTE,
ST_FLD_DOUBLE,
5,
ST_FLD_I16,
7,
ST_FLD_I32,
9,
ST_FLD_I64,
ST_FLD_BINARY,
ST_FLD_STRUCT,
ST_FLD_MAP,
ST_FLD_SET,
ST_FLD_LIST};
struct byte_stream_s {
const uint8_t* cur;
const uint8_t* end;
const uint8_t* base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
/**
* @brief Get current byte from the byte stream
*
* @param[in] bs Byte stream
*
* @return Current byte pointed to by the byte stream
*/
inline __device__ unsigned int getb(byte_stream_s* bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s* bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
/**
* @brief Decode unsigned integer from a byte stream using VarInt encoding
*
* Concatenate least significant 7 bits of each byte to form a 32 bit
* integer. Most significant bit of each byte indicates if more bytes
* are to be used to form the number.
*
* @param[in] bs Byte stream
*
* @return Decoded 32 bit integer
*/
__device__ uint32_t get_u32(byte_stream_s* bs)
{
uint32_t v = 0, l = 0, c;
do {
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
/**
* @brief Decode signed integer from a byte stream using zigzag encoding
*
* The number n encountered in a byte stream translates to
* -1^(n%2) * ceil(n/2), with the exception of 0 which remains the same.
* i.e. 0, 1, 2, 3, 4, 5 etc convert to 0, -1, 1, -2, 2 respectively.
*
* @param[in] bs Byte stream
*
* @return Decoded 32 bit integer
*/
inline __device__ int32_t get_i32(byte_stream_s* bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s* bs, int field_type)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0) {
rep_cnt--;
} else if (struct_depth != 0) {
unsigned int c;
do {
c = getb(bs);
if (!c) --struct_depth;
} while (!c && struct_depth);
if (!struct_depth) break;
field_type = c & 0xf;
if (!(c & 0xf0)) get_i32(bs);
}
switch (field_type) {
case ST_FLD_TRUE:
case ST_FLD_FALSE: break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64: get_u32(bs); break;
case ST_FLD_BYTE: skip_bytes(bs, 1); break;
case ST_FLD_DOUBLE: skip_bytes(bs, 8); break;
case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break;
case ST_FLD_LIST:
case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled
auto const c = getb(bs);
int n = c >> 4;
if (n == 0xf) n = get_u32(bs);
field_type = g_list2struct[c & 0xf];
if (field_type == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
} break;
case ST_FLD_STRUCT: struct_depth++; break;
}
} while (rep_cnt || struct_depth);
}
/**
* @brief Functor to set value to 32 bit integer read from byte stream
*
* @return True if field type is not int32
*/
struct ParquetFieldInt32 {
int field;
int32_t& val;
__device__ ParquetFieldInt32(int f, int32_t& v) : field(f), val(v) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
val = get_i32(bs);
return (field_type != ST_FLD_I32);
}
};
/**
* @brief Functor to set value to enum read from byte stream
*
* @return True if field type is not int32
*/
template <typename Enum>
struct ParquetFieldEnum {
int field;
Enum& val;
__device__ ParquetFieldEnum(int f, Enum& v) : field(f), val(v) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
val = static_cast<Enum>(get_i32(bs));
return (field_type != ST_FLD_I32);
}
};
/**
* @brief Functor to run operator on byte stream
*
* @return True if field type is not struct type or if the calling operator
* fails
*/
template <typename Operator>
struct ParquetFieldStruct {
int field;
Operator op;
__device__ ParquetFieldStruct(int f) : field(f) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
return ((field_type != ST_FLD_STRUCT) || !op(bs));
}
};
/**
* @brief Functor to run an operator
*
* The purpose of this functor is to replace a switch case. If the field in
* the argument is equal to the field specified in any element of the tuple
* of operators then it is run with the byte stream and field type arguments.
*
* If the field does not match any of the functors then skip_struct_field is
* called over the byte stream.
*
* @return Return value of the selected operator or false if no operator
* matched the field value
*/
template <int index>
struct FunctionSwitchImpl {
template <typename... Operator>
static inline __device__ bool run(byte_stream_s* bs,
int field_type,
const int& field,
thrust::tuple<Operator...>& ops)
{
if (field == thrust::get<index>(ops).field) {
return thrust::get<index>(ops)(bs, field_type);
} else {
return FunctionSwitchImpl<index - 1>::run(bs, field_type, field, ops);
}
}
};
template <>
struct FunctionSwitchImpl<0> {
template <typename... Operator>
static inline __device__ bool run(byte_stream_s* bs,
int field_type,
const int& field,
thrust::tuple<Operator...>& ops)
{
if (field == thrust::get<0>(ops).field) {
return thrust::get<0>(ops)(bs, field_type);
} else {
skip_struct_field(bs, field_type);
return false;
}
}
};
/**
* @brief Function to parse page header based on the tuple of functors provided
*
* Bytes are read from the byte stream and the field delta and field type are
* matched up against user supplied reading functors. If they match then the
* corresponding values are written to references pointed to by the functors.
*
* @return Returns false if an unexpected field is encountered while reading
* byte stream. Otherwise true is returned.
*/
template <typename... Operator>
inline __device__ bool parse_header(thrust::tuple<Operator...>& op, byte_stream_s* bs)
{
constexpr int index = thrust::tuple_size<thrust::tuple<Operator...>>::value - 1;
int field = 0;
while (true) {
auto const current_byte = getb(bs);
if (!current_byte) break;
int const field_delta = current_byte >> 4;
int const field_type = current_byte & 0xf;
field = field_delta ? field + field_delta : get_i32(bs);
bool exit_function = FunctionSwitchImpl<index>::run(bs, field_type, field, op);
if (exit_function) { return false; }
}
return true;
}
struct gpuParseDataPageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldEnum<Encoding>(2, bs->page.encoding),
ParquetFieldEnum<Encoding>(3, bs->page.definition_level_encoding),
ParquetFieldEnum<Encoding>(4, bs->page.repetition_level_encoding));
return parse_header(op, bs);
}
};
struct gpuParseDictionaryPageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldEnum<Encoding>(2, bs->page.encoding));
return parse_header(op, bs);
}
};
struct gpuParseDataPageHeaderV2 {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldInt32(3, bs->page.num_rows),
ParquetFieldEnum<Encoding>(4, bs->page.encoding),
ParquetFieldEnum<Encoding>(5, bs->page.definition_level_encoding),
ParquetFieldEnum<Encoding>(6, bs->page.repetition_level_encoding));
return parse_header(op, bs);
}
};
struct gpuParsePageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldEnum<PageType>(1, bs->page_type),
ParquetFieldInt32(2, bs->page.uncompressed_page_size),
ParquetFieldInt32(3, bs->page.compressed_page_size),
ParquetFieldStruct<gpuParseDataPageHeader>(5),
ParquetFieldStruct<gpuParseDictionaryPageHeader>(7),
ParquetFieldStruct<gpuParseDataPageHeaderV2>(8));
return parse_header(op, bs);
}
};
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc* chunks, int32_t num_chunks)
{
gpuParsePageHeader parse_page_header;
__shared__ byte_stream_s bs_g[4];
int lane_id = threadIdx.x % 32;
int chunk = (blockIdx.x * 4) + (threadIdx.x / 32);
byte_stream_s* const bs = &bs_g[threadIdx.x / 32];
if (chunk < num_chunks and lane_id == 0) bs->ck = chunks[chunk];
__syncthreads();
if (chunk < num_chunks) {
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo* page_info;
if (!lane_id) {
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.src_col_schema = bs->ck.src_col_schema;
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
__syncwarp();
while (values_found < num_values && bs->cur < bs->end) {
int index_out = -1;
if (lane_id == 0) {
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
if (parse_page_header(bs) && bs->page.compressed_page_size >= 0) {
switch (bs->page_type) {
case PageType::DATA_PAGE:
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.num_rows = bs->page.num_input_values;
case PageType::DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags = 0;
values_found += bs->page.num_input_values;
break;
case PageType::DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags = PAGEINFO_FLAGS_DICTIONARY;
break;
default: index_out = -1; break;
}
bs->page.page_data = const_cast<uint8_t*>(bs->cur);
bs->cur += bs->page.compressed_page_size;
} else {
bs->cur = bs->end;
}
}
index_out = shuffle(index_out);
if (index_out >= 0 && index_out < max_num_pages && lane_id == 0)
page_info[index_out] = bs->page;
num_values = shuffle(num_values);
__syncwarp();
}
if (lane_id == 0) {
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
extern "C" __global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc* chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int lane_id = threadIdx.x % 32;
int chunk = (blockIdx.x * 4) + (threadIdx.x / 32);
ColumnChunkDesc* const ck = &chunk_g[threadIdx.x / 32];
if (chunk < num_chunks and lane_id == 0) *ck = chunks[chunk];
__syncthreads();
if (chunk >= num_chunks) { return; }
if (!lane_id && ck->num_dict_pages > 0 && ck->str_dict_index) {
// Data type to describe a string
string_index_pair* dict_index = ck->str_dict_index;
const uint8_t* dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_input_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++) {
int len = 0;
if (cur + 4 <= dict_size) {
len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size) {
pos = cur;
cur = cur + 4 + len;
} else {
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].first = reinterpret_cast<const char*>(dict + pos + 4);
dict_index[i].second = len;
}
}
}
void __host__ DecodePageHeaders(ColumnChunkDesc* chunks,
int32_t num_chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_chunks);
}
void __host__ BuildStringDictionaryIndex(ColumnChunkDesc* chunks,
int32_t num_chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_chunks);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
5c5227e6964d2895ba2c0ebb765f2ffebfe2d14c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softmax_op.h"
#include "caffe2/operators/softmax_with_loss_op.h"
#include "caffe2/operators/spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(fmaxf(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(fabsf(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = fmaxf(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = expf(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -logf(fmaxf(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(fmaxf(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->CopySameDevice<float>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
hipLaunchKernelGGL(( SoftmaxNormalizeKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, probs, scales, probs);
} else {
hipLaunchKernelGGL(( SoftmaxNormalizeLogsKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
avg_loss->Resize(vector<int64_t>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->template mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
hipLaunchKernelGGL(( LabelCrossEntropyKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->template mutable_data<float>(), &context_);
} else {
hipLaunchKernelGGL(( ProbCrossEntropyKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Sum of all losses
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->template mutable_data<float>();
// Softmax for each x,y location
hipLaunchKernelGGL(( SpatialSoftmaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<int64_t>());
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
CUDA_CHECK(hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.CopySameDevice<float>(
P.size(), P.data<float>(), dX->template mutable_data<float>());
}
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernelWeighted),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>(),
weights);
}
} else {
hipLaunchKernelGGL(( ProbCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<float>(),
dX->template mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->template mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.CopySameDevice<float>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
CUDA_CHECK(hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
auto* P_data = P->mutable_data<float>();
if (N == 0) {
return true;
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P_data,
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
auto* dX_data = dX->mutable_data<float>();
if (N == 0) {
return true;
}
hipLaunchKernelGGL(( softmax_gradient_kernel),
dim3(N),
dim3(SOFTMAX_NUM_THREADS),
0,
context_.cuda_stream(), D, Y.data<float>(), dY.data<float>(), dX_data);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
| 5c5227e6964d2895ba2c0ebb765f2ffebfe2d14c.cu | #include <cfloat>
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softmax_op.h"
#include "caffe2/operators/softmax_with_loss_op.h"
#include "caffe2/operators/spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(fmaxf(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(fabsf(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = fmaxf(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = expf(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -logf(fmaxf(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(fmaxf(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->CopySameDevice<float>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
SoftmaxNormalizeKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, probs, scales, probs);
} else {
SoftmaxNormalizeLogsKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
avg_loss->Resize(vector<int64_t>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->template mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
LabelCrossEntropyKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->template mutable_data<float>(), &context_);
} else {
ProbCrossEntropyKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Sum of all losses
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->template mutable_data<float>();
// Softmax for each x,y location
SpatialSoftmaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<int64_t>());
float* avg_loss_data = avg_loss->template mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialCrossEntropyLossKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.CopySameDevice<float>(
P.size(), P.data<float>(), dX->template mutable_data<float>());
}
LabelCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
LabelCrossEntropyGradientKernelWeighted<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->template mutable_data<float>(),
weights);
}
} else {
ProbCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<float>(),
dX->template mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->template mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.CopySameDevice<float>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialSoftmaxLossGradientKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
CUDA_CHECK(cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
}
math::Scale<float, float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->template mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
auto* P_data = P->mutable_data<float>();
if (N == 0) {
return true;
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P_data,
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
auto* dX_data = dX->mutable_data<float>();
if (N == 0) {
return true;
}
softmax_gradient_kernel<<<
N,
SOFTMAX_NUM_THREADS,
0,
context_.cuda_stream()>>>(D, Y.data<float>(), dY.data<float>(), dX_data);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
815d871095ae7facea0859eb6996ba5044b8ca77.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "resizer.h"
namespace solids
{
namespace lib
{
namespace video
{
namespace nvidia
{
template<class T>
__device__ static T clamp(T x, T lower, T upper)
{
return x < lower ? lower : (x > upper ? upper : x);
}
template<typename yuv>
static __global__ void resize(hipTextureObject_t texY, hipTextureObject_t texUV, unsigned char* dst, unsigned char* dstUV, int pitch, int width, int height, float fxScale, float fyScale)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,
iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width / 2 || iy >= height / 2)
return;
int x = ix * 2, y = iy * 2;
typedef decltype(yuv::x) YuvUnit;
const int MAX = 1 << (sizeof(YuvUnit) * 8);
yuv data;
data.x = (YuvUnit)clamp((float)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX), 0.0f, 255.0f);
data.y = (YuvUnit)clamp((float)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX), 0.0f, 255.0f);
*(yuv*)(dst + y * pitch + x * sizeof(YuvUnit)) = data;
y++;
data.x = (YuvUnit)clamp((float)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX), 0.0f, 255.0f);
data.y = (YuvUnit)clamp((float)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX), 0.0f, 255.0f);
*(yuv*)(dst + y * pitch + x * sizeof(YuvUnit)) = data;
float2 uv = tex2D<float2>(texUV, ix / fxScale, (height + iy) / fyScale + 0.5f);
data.x = (YuvUnit)clamp((float)(uv.x * MAX), 0.0f, 255.0f);
data.y = (YuvUnit)clamp((float)(uv.y * MAX), 0.0f, 255.0f);
*(yuv*)(dstUV + iy * pitch + ix * 2 * sizeof(YuvUnit)) = data;
}
static void resize(unsigned char* dst, unsigned char* dstChroma, int dstPitch, int dstWidth, int dstHeight, unsigned char* src, int srcPitch, int srcWidth, int srcHeight)
{
hipResourceDesc resDesc = {};
resDesc.resType = hipResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = src;
resDesc.res.pitch2D.desc = hipCreateChannelDesc<decltype(uchar2::x)>();
resDesc.res.pitch2D.width = srcWidth;
resDesc.res.pitch2D.height = srcHeight;
resDesc.res.pitch2D.pitchInBytes = srcPitch;
hipTextureDesc texDesc = {};
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
hipTextureObject_t texY = 0;
hipCreateTextureObject(&texY, &resDesc, &texDesc, NULL);
resDesc.res.pitch2D.desc = hipCreateChannelDesc<uchar2>();
resDesc.res.pitch2D.width = srcWidth / 2;
resDesc.res.pitch2D.height = srcHeight * 3 / 2;
hipTextureObject_t texUV = 0;
hipCreateTextureObject(&texUV, &resDesc, &texDesc, NULL);
resize<uchar2> << <dim3((dstWidth + 31) / 32, (dstHeight + 31) / 32), dim3(16, 16) >> > (texY, texUV, dst, dstChroma, dstPitch, dstWidth, dstHeight, 1.0f * (float)dstWidth / (float)srcWidth, 1.0f * (float)dstHeight / (float)srcHeight);
hipDestroyTextureObject(texY);
hipDestroyTextureObject(texUV);
}
void decoder::resizer::resize_nv12(unsigned char* dstNV12, int dstNV12Pitch, int dstNV12Width, int dstNV12Height, unsigned char* srcNV12, int srcNV12Pitch, int srcNV12Width, int srcNV12Height)
{
unsigned char* dstNV12Chroma = dstNV12 + (dstNV12Pitch * dstNV12Height);
return resize(dstNV12, dstNV12Chroma, dstNV12Pitch, dstNV12Width, dstNV12Height, srcNV12, srcNV12Pitch, srcNV12Width, srcNV12Height);
}
};
};
};
};
| 815d871095ae7facea0859eb6996ba5044b8ca77.cu | #include <cuda_runtime.h>
#include "resizer.h"
namespace solids
{
namespace lib
{
namespace video
{
namespace nvidia
{
template<class T>
__device__ static T clamp(T x, T lower, T upper)
{
return x < lower ? lower : (x > upper ? upper : x);
}
template<typename yuv>
static __global__ void resize(cudaTextureObject_t texY, cudaTextureObject_t texUV, unsigned char* dst, unsigned char* dstUV, int pitch, int width, int height, float fxScale, float fyScale)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x,
iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width / 2 || iy >= height / 2)
return;
int x = ix * 2, y = iy * 2;
typedef decltype(yuv::x) YuvUnit;
const int MAX = 1 << (sizeof(YuvUnit) * 8);
yuv data;
data.x = (YuvUnit)clamp((float)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX), 0.0f, 255.0f);
data.y = (YuvUnit)clamp((float)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX), 0.0f, 255.0f);
*(yuv*)(dst + y * pitch + x * sizeof(YuvUnit)) = data;
y++;
data.x = (YuvUnit)clamp((float)(tex2D<float>(texY, x / fxScale, y / fyScale) * MAX), 0.0f, 255.0f);
data.y = (YuvUnit)clamp((float)(tex2D<float>(texY, (x + 1) / fxScale, y / fyScale) * MAX), 0.0f, 255.0f);
*(yuv*)(dst + y * pitch + x * sizeof(YuvUnit)) = data;
float2 uv = tex2D<float2>(texUV, ix / fxScale, (height + iy) / fyScale + 0.5f);
data.x = (YuvUnit)clamp((float)(uv.x * MAX), 0.0f, 255.0f);
data.y = (YuvUnit)clamp((float)(uv.y * MAX), 0.0f, 255.0f);
*(yuv*)(dstUV + iy * pitch + ix * 2 * sizeof(YuvUnit)) = data;
}
static void resize(unsigned char* dst, unsigned char* dstChroma, int dstPitch, int dstWidth, int dstHeight, unsigned char* src, int srcPitch, int srcWidth, int srcHeight)
{
cudaResourceDesc resDesc = {};
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = src;
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<decltype(uchar2::x)>();
resDesc.res.pitch2D.width = srcWidth;
resDesc.res.pitch2D.height = srcHeight;
resDesc.res.pitch2D.pitchInBytes = srcPitch;
cudaTextureDesc texDesc = {};
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
cudaTextureObject_t texY = 0;
cudaCreateTextureObject(&texY, &resDesc, &texDesc, NULL);
resDesc.res.pitch2D.desc = cudaCreateChannelDesc<uchar2>();
resDesc.res.pitch2D.width = srcWidth / 2;
resDesc.res.pitch2D.height = srcHeight * 3 / 2;
cudaTextureObject_t texUV = 0;
cudaCreateTextureObject(&texUV, &resDesc, &texDesc, NULL);
resize<uchar2> << <dim3((dstWidth + 31) / 32, (dstHeight + 31) / 32), dim3(16, 16) >> > (texY, texUV, dst, dstChroma, dstPitch, dstWidth, dstHeight, 1.0f * (float)dstWidth / (float)srcWidth, 1.0f * (float)dstHeight / (float)srcHeight);
cudaDestroyTextureObject(texY);
cudaDestroyTextureObject(texUV);
}
void decoder::resizer::resize_nv12(unsigned char* dstNV12, int dstNV12Pitch, int dstNV12Width, int dstNV12Height, unsigned char* srcNV12, int srcNV12Pitch, int srcNV12Width, int srcNV12Height)
{
unsigned char* dstNV12Chroma = dstNV12 + (dstNV12Pitch * dstNV12Height);
return resize(dstNV12, dstNV12Chroma, dstNV12Pitch, dstNV12Width, dstNV12Height, srcNV12, srcNV12Pitch, srcNV12Width, srcNV12Height);
}
};
};
};
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.