hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
3fa373dbc9b6e1f2d6f755d0159cb9ee994b0ccc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "momentum_impl.cuh"
template <typename T, typename S, typename G>
__global__ void MomentumUpdateVariableKernel(const size_t size, T *variable, T *accumulation, const S *learning_rate,
const G *gradient, const S *momentum, bool use_nesterov) {
if (use_nesterov) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + gradient[i];
variable[i] -= gradient[i] * learning_rate[0] + accumulation[i] * momentum[0] * learning_rate[0];
}
} else {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + gradient[i];
variable[i] -= learning_rate[0] * accumulation[i];
}
}
return;
}
template <>
__global__ void MomentumUpdateVariableKernel(const size_t size, half *variable, half *accumulation,
const float *learning_rate, const half *gradient, const float *momentum,
bool use_nesterov) {
if (use_nesterov) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = __float2half(momentum[0]) * accumulation[i] + gradient[i];
variable[i] -= gradient[i] * __float2half(learning_rate[0]) +
accumulation[i] * __float2half(momentum[0]) * __float2half(learning_rate[0]);
}
} else {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = __float2half(momentum[0]) * accumulation[i] + gradient[i];
variable[i] -= __float2half(learning_rate[0]) * accumulation[i];
}
}
return;
}
template <>
__global__ void MomentumUpdateVariableKernel(const size_t size, float *variable, float *accumulation,
const float *learning_rate, const half *gradient, const float *momentum,
bool use_nesterov) {
if (use_nesterov) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + __half2float(gradient[i]);
variable[i] -= __half2float(gradient[i]) * learning_rate[0] + accumulation[i] * momentum[0] * learning_rate[0];
}
} else {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + __half2float(gradient[i]);
variable[i] -= learning_rate[0] * accumulation[i];
}
}
return;
}
template <typename T, typename S, typename G>
void MomentumUpdateVariable(const size_t size, T *variable, T *accumulation, const S *learning_rate, const G *gradient,
const S *momentum, bool use_nesterov, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( MomentumUpdateVariableKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream,
size, variable, accumulation, learning_rate, gradient, momentum, use_nesterov);
return;
}
template <typename T, typename S>
__global__ void FusedMomentumWeightDecayScaleKernel(const size_t element_num, T *weight_decay, T *scale, T *variable,
T *accumulation, const T *learning_rate, const S *gradient,
const T *momentum) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) {
T grad = (variable[i] * weight_decay[0] + static_cast<T>(gradient[i])) * scale[0];
accumulation[i] = momentum[0] * accumulation[i] + grad;
variable[i] -= learning_rate[0] * accumulation[i];
}
}
template <typename T, typename S>
void FusedWeightDecayScaleMomentum(const size_t element_num, T *weight_decay, T *scale, T *variable, T *accumulation,
const T *learning_rate, const S *gradient, const T *momentum,
hipStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block;
hipLaunchKernelGGL(( FusedMomentumWeightDecayScaleKernel), dim3(block_per_grid), dim3(thread_per_block), 0, cuda_stream,
element_num, weight_decay, scale, variable, accumulation, learning_rate, gradient, momentum);
}
template <typename T, typename S>
__global__ void FusedMomentumScaleKernel(const size_t element_num, T *scale, T *variable, T *accumulation,
const T *learning_rate, const S *gradient, const T *momentum) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + static_cast<T>(gradient[i]) * scale[0];
variable[i] -= learning_rate[0] * accumulation[i];
}
}
template <typename T, typename S>
void FusedScaleMomentum(const size_t element_num, T *scale, T *variable, T *accumulation, const T *learning_rate,
const S *gradient, const T *momentum, hipStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block;
hipLaunchKernelGGL(( FusedMomentumScaleKernel), dim3(block_per_grid), dim3(thread_per_block), 0, cuda_stream,
element_num, scale, variable, accumulation, learning_rate, gradient, momentum);
}
template <typename T, typename S>
__global__ void FusedWeightDecayMomentumKernel(const size_t element_num, T *weight_decay, T *variable, T *accumulation,
const T *learning_rate, const S *gradient, const T *momentum) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) {
T grad = variable[i] * weight_decay[0] + static_cast<T>(gradient[i]);
accumulation[i] = momentum[0] * accumulation[i] + grad;
variable[i] -= learning_rate[0] * accumulation[i];
}
}
template <typename T, typename S>
void FusedWeightDecayMomentum(const size_t element_num, T *weight_decay, T *variable, T *accumulation,
const T *learning_rate, const S *gradient, const T *momentum, hipStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block;
hipLaunchKernelGGL(( FusedWeightDecayMomentumKernel), dim3(block_per_grid), dim3(thread_per_block), 0, cuda_stream,
element_num, weight_decay, variable, accumulation, learning_rate, gradient, momentum);
}
// CombineFusedScaleMomentum
template <typename T, typename S>
__global__ void CombineFusedMomentumScaleKernel(const size_t num, const size_t *element_num, T **scale, T **variable,
T **accumulation, T **learning_rate, S **gradient, T **momentum) {
for (size_t idx = 0; idx < num; idx++) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) {
accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + static_cast<T>(gradient[idx][i]) * scale[idx][0];
variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i];
}
}
}
template <typename T, typename S>
void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, T **scale, T **variable,
T **accumulation, T **learning_rate, S **gradient, T **momentum,
hipStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block;
hipLaunchKernelGGL(( CombineFusedMomentumScaleKernel), dim3(block_per_grid), dim3(thread_per_block), 0, cuda_stream,
num, elements, scale, variable, accumulation, learning_rate, gradient, momentum);
}
// end CombineFusedScaleMomentum
// CombineFusedWeightDecayScaleMomentum
template <typename T, typename S>
__global__ void CombineFusedMomentumWeightDecayScaleKernel(const size_t num, const size_t *element_num,
T **weight_decay, T **scale, T **variable, T **accumulation,
T **learning_rate, S **gradient, T **momentum) {
for (size_t idx = 0; idx < num; idx++) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) {
T grad = (variable[idx][i] * weight_decay[idx][0] + static_cast<T>(gradient[idx][i])) * scale[idx][0];
accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + grad;
variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i];
}
}
}
template <typename T, typename S>
void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *element_num,
T **weight_decay, T **scale, T **variable, T **accumulation,
T **learning_rate, S **gradient, T **momentum, hipStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block;
hipLaunchKernelGGL(( CombineFusedMomentumWeightDecayScaleKernel), dim3(block_per_grid), dim3(thread_per_block), 0, cuda_stream,
num, element_num, weight_decay, scale, variable, accumulation, learning_rate, gradient, momentum);
}
// end CombineFusedWeightDecayScaleMomentum
template void MomentumUpdateVariable<float, float, float>(const size_t size, float *variable, float *accumulation,
const float *learning_rate, const float *gradient,
const float *momentum, bool use_nesterov,
hipStream_t cuda_stream);
template void MomentumUpdateVariable<half, half, half>(const size_t size, half *variable, half *accumulation,
const half *learning_rate, const half *gradient,
const half *momentum, bool use_nesterov,
hipStream_t cuda_stream);
template void MomentumUpdateVariable<half, float, half>(const size_t size, half *variable, half *accumulation,
const float *learning_rate, const half *gradient,
const float *momentum, bool use_nesterov,
hipStream_t cuda_stream);
template void MomentumUpdateVariable<float, float, half>(const size_t size, float *variable, float *accumulation,
const float *learning_rate, const half *gradient,
const float *momentum, bool use_nesterov,
hipStream_t cuda_stream);
template void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
float *variable, float *accumulation, const float *learning_rate,
const float *gradient, const float *momentum, hipStream_t cuda_stream);
template void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
float *variable, float *accumulation, const float *learning_rate,
const half *gradient, const float *momentum, hipStream_t cuda_stream);
template void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable,
float *accumulation, const float *learning_rate, const float *gradient,
const float *momentum, hipStream_t cuda_stream);
template void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable,
float *accumulation, const float *learning_rate, const half *gradient,
const float *momentum, hipStream_t cuda_stream);
template void FusedScaleMomentum(const size_t element_num, float *scale, float *variable, float *accumulation,
const float *learning_rate, const float *gradient, const float *momentum,
hipStream_t cuda_stream);
template void FusedScaleMomentum(const size_t element_num, float *scale, float *variable, float *accumulation,
const float *learning_rate, const half *gradient, const float *momentum,
hipStream_t cuda_stream);
template void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *elements,
float **weight_decay, float **scale, float **variable,
float **accumulation, float **learning_rate, float **gradient,
float **momentum, hipStream_t cuda_stream);
template void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *elements,
float **weight_decay, float **scale, float **variable,
float **accumulation, float **learning_rate, half **gradient,
float **momentum, hipStream_t cuda_stream);
template void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, float **scale,
float **variable, float **accumulation, float **learning_rate, float **gradient,
float **momentum, hipStream_t cuda_stream);
template void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, float **scale,
float **variable, float **accumulation, float **learning_rate, half **gradient,
float **momentum, hipStream_t cuda_stream);
| 3fa373dbc9b6e1f2d6f755d0159cb9ee994b0ccc.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "momentum_impl.cuh"
template <typename T, typename S, typename G>
__global__ void MomentumUpdateVariableKernel(const size_t size, T *variable, T *accumulation, const S *learning_rate,
const G *gradient, const S *momentum, bool use_nesterov) {
if (use_nesterov) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + gradient[i];
variable[i] -= gradient[i] * learning_rate[0] + accumulation[i] * momentum[0] * learning_rate[0];
}
} else {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + gradient[i];
variable[i] -= learning_rate[0] * accumulation[i];
}
}
return;
}
template <>
__global__ void MomentumUpdateVariableKernel(const size_t size, half *variable, half *accumulation,
const float *learning_rate, const half *gradient, const float *momentum,
bool use_nesterov) {
if (use_nesterov) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = __float2half(momentum[0]) * accumulation[i] + gradient[i];
variable[i] -= gradient[i] * __float2half(learning_rate[0]) +
accumulation[i] * __float2half(momentum[0]) * __float2half(learning_rate[0]);
}
} else {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = __float2half(momentum[0]) * accumulation[i] + gradient[i];
variable[i] -= __float2half(learning_rate[0]) * accumulation[i];
}
}
return;
}
template <>
__global__ void MomentumUpdateVariableKernel(const size_t size, float *variable, float *accumulation,
const float *learning_rate, const half *gradient, const float *momentum,
bool use_nesterov) {
if (use_nesterov) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + __half2float(gradient[i]);
variable[i] -= __half2float(gradient[i]) * learning_rate[0] + accumulation[i] * momentum[0] * learning_rate[0];
}
} else {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (size); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + __half2float(gradient[i]);
variable[i] -= learning_rate[0] * accumulation[i];
}
}
return;
}
template <typename T, typename S, typename G>
void MomentumUpdateVariable(const size_t size, T *variable, T *accumulation, const S *learning_rate, const G *gradient,
const S *momentum, bool use_nesterov, cudaStream_t cuda_stream) {
MomentumUpdateVariableKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(
size, variable, accumulation, learning_rate, gradient, momentum, use_nesterov);
return;
}
template <typename T, typename S>
__global__ void FusedMomentumWeightDecayScaleKernel(const size_t element_num, T *weight_decay, T *scale, T *variable,
T *accumulation, const T *learning_rate, const S *gradient,
const T *momentum) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) {
T grad = (variable[i] * weight_decay[0] + static_cast<T>(gradient[i])) * scale[0];
accumulation[i] = momentum[0] * accumulation[i] + grad;
variable[i] -= learning_rate[0] * accumulation[i];
}
}
template <typename T, typename S>
void FusedWeightDecayScaleMomentum(const size_t element_num, T *weight_decay, T *scale, T *variable, T *accumulation,
const T *learning_rate, const S *gradient, const T *momentum,
cudaStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block;
FusedMomentumWeightDecayScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>(
element_num, weight_decay, scale, variable, accumulation, learning_rate, gradient, momentum);
}
template <typename T, typename S>
__global__ void FusedMomentumScaleKernel(const size_t element_num, T *scale, T *variable, T *accumulation,
const T *learning_rate, const S *gradient, const T *momentum) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) {
accumulation[i] = momentum[0] * accumulation[i] + static_cast<T>(gradient[i]) * scale[0];
variable[i] -= learning_rate[0] * accumulation[i];
}
}
template <typename T, typename S>
void FusedScaleMomentum(const size_t element_num, T *scale, T *variable, T *accumulation, const T *learning_rate,
const S *gradient, const T *momentum, cudaStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block;
FusedMomentumScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>(
element_num, scale, variable, accumulation, learning_rate, gradient, momentum);
}
template <typename T, typename S>
__global__ void FusedWeightDecayMomentumKernel(const size_t element_num, T *weight_decay, T *variable, T *accumulation,
const T *learning_rate, const S *gradient, const T *momentum) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num); i += blockDim.x * gridDim.x) {
T grad = variable[i] * weight_decay[0] + static_cast<T>(gradient[i]);
accumulation[i] = momentum[0] * accumulation[i] + grad;
variable[i] -= learning_rate[0] * accumulation[i];
}
}
template <typename T, typename S>
void FusedWeightDecayMomentum(const size_t element_num, T *weight_decay, T *variable, T *accumulation,
const T *learning_rate, const S *gradient, const T *momentum, cudaStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (element_num + thread_per_block - 1) / thread_per_block;
FusedWeightDecayMomentumKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>(
element_num, weight_decay, variable, accumulation, learning_rate, gradient, momentum);
}
// CombineFusedScaleMomentum
template <typename T, typename S>
__global__ void CombineFusedMomentumScaleKernel(const size_t num, const size_t *element_num, T **scale, T **variable,
T **accumulation, T **learning_rate, S **gradient, T **momentum) {
for (size_t idx = 0; idx < num; idx++) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) {
accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + static_cast<T>(gradient[idx][i]) * scale[idx][0];
variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i];
}
}
}
template <typename T, typename S>
void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, T **scale, T **variable,
T **accumulation, T **learning_rate, S **gradient, T **momentum,
cudaStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block;
CombineFusedMomentumScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>(
num, elements, scale, variable, accumulation, learning_rate, gradient, momentum);
}
// end CombineFusedScaleMomentum
// CombineFusedWeightDecayScaleMomentum
template <typename T, typename S>
__global__ void CombineFusedMomentumWeightDecayScaleKernel(const size_t num, const size_t *element_num,
T **weight_decay, T **scale, T **variable, T **accumulation,
T **learning_rate, S **gradient, T **momentum) {
for (size_t idx = 0; idx < num; idx++) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (element_num[idx]); i += blockDim.x * gridDim.x) {
T grad = (variable[idx][i] * weight_decay[idx][0] + static_cast<T>(gradient[idx][i])) * scale[idx][0];
accumulation[idx][i] = momentum[idx][0] * accumulation[idx][i] + grad;
variable[idx][i] -= learning_rate[idx][0] * accumulation[idx][i];
}
}
}
template <typename T, typename S>
void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *element_num,
T **weight_decay, T **scale, T **variable, T **accumulation,
T **learning_rate, S **gradient, T **momentum, cudaStream_t cuda_stream) {
size_t thread_per_block = 256;
size_t block_per_grid = (max + thread_per_block - 1) / thread_per_block;
CombineFusedMomentumWeightDecayScaleKernel<<<block_per_grid, thread_per_block, 0, cuda_stream>>>(
num, element_num, weight_decay, scale, variable, accumulation, learning_rate, gradient, momentum);
}
// end CombineFusedWeightDecayScaleMomentum
template void MomentumUpdateVariable<float, float, float>(const size_t size, float *variable, float *accumulation,
const float *learning_rate, const float *gradient,
const float *momentum, bool use_nesterov,
cudaStream_t cuda_stream);
template void MomentumUpdateVariable<half, half, half>(const size_t size, half *variable, half *accumulation,
const half *learning_rate, const half *gradient,
const half *momentum, bool use_nesterov,
cudaStream_t cuda_stream);
template void MomentumUpdateVariable<half, float, half>(const size_t size, half *variable, half *accumulation,
const float *learning_rate, const half *gradient,
const float *momentum, bool use_nesterov,
cudaStream_t cuda_stream);
template void MomentumUpdateVariable<float, float, half>(const size_t size, float *variable, float *accumulation,
const float *learning_rate, const half *gradient,
const float *momentum, bool use_nesterov,
cudaStream_t cuda_stream);
template void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
float *variable, float *accumulation, const float *learning_rate,
const float *gradient, const float *momentum, cudaStream_t cuda_stream);
template void FusedWeightDecayScaleMomentum(const size_t element_num, float *weight_decay, float *scale,
float *variable, float *accumulation, const float *learning_rate,
const half *gradient, const float *momentum, cudaStream_t cuda_stream);
template void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable,
float *accumulation, const float *learning_rate, const float *gradient,
const float *momentum, cudaStream_t cuda_stream);
template void FusedWeightDecayMomentum(const size_t element_num, float *weight_decay, float *variable,
float *accumulation, const float *learning_rate, const half *gradient,
const float *momentum, cudaStream_t cuda_stream);
template void FusedScaleMomentum(const size_t element_num, float *scale, float *variable, float *accumulation,
const float *learning_rate, const float *gradient, const float *momentum,
cudaStream_t cuda_stream);
template void FusedScaleMomentum(const size_t element_num, float *scale, float *variable, float *accumulation,
const float *learning_rate, const half *gradient, const float *momentum,
cudaStream_t cuda_stream);
template void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *elements,
float **weight_decay, float **scale, float **variable,
float **accumulation, float **learning_rate, float **gradient,
float **momentum, cudaStream_t cuda_stream);
template void CombineFusedWeightDecayScaleMomentum(const size_t max, const size_t num, const size_t *elements,
float **weight_decay, float **scale, float **variable,
float **accumulation, float **learning_rate, half **gradient,
float **momentum, cudaStream_t cuda_stream);
template void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, float **scale,
float **variable, float **accumulation, float **learning_rate, float **gradient,
float **momentum, cudaStream_t cuda_stream);
template void CombineFusedScaleMomentum(const size_t max, const size_t num, const size_t *elements, float **scale,
float **variable, float **accumulation, float **learning_rate, half **gradient,
float **momentum, cudaStream_t cuda_stream);
|
1c8613da1cee3bb9beafa1ed33d1300e1ad7cab5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N 10000000
__global__ void add(int *a, int *b, int *c)
{
int tx = threadIdx.x;
int bx = blockDim.x * blockIdx.x;
int tid = bx + tx;
while (tid < N)
{
c[tid] = a[tid] + b[tid];
tid += gridDim.x * blockDim.x;
}
}
int main(void)
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
a = (int *)malloc(N * sizeof(int));
b = (int *)malloc(N * sizeof(int));
c = (int *)malloc(N * sizeof(int));
if (hipMalloc((void **)&dev_a, N * sizeof(int)) != hipSuccess) return 1;
if (hipMalloc((void **)&dev_b, N * sizeof(int)) != hipSuccess) return 1 ;
if (hipMalloc((void **)&dev_c, N * sizeof(int)) != hipSuccess) return 1 ;
srand(time(NULL));
for (int i = 0; i < N; i++)
{
a[i] = rand() % N;
b[i] = rand() % N;
}
int *d;
d = (int *)malloc(N * sizeof(int));
int cpu = true;
double elapsedTimeCPU;
struct timespec t_start, t_end;
if (cpu) {
clock_gettime( CLOCK_REALTIME, &t_start);
for (int i = 0; i < N; i++) {
d[i] = a[i] + b[i];
}
clock_gettime( CLOCK_REALTIME, &t_end);
elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0;
elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0;
printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU);
}
if (hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice) != hipSuccess) return 1 ;
if (hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice) != hipSuccess) return 1 ;
int per_threads = 256;
int per_blocks = N / per_threads;
printf("Per threads: %d\n", per_threads);
printf("Per blocks: %d\n", per_blocks);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( add), dim3(per_blocks), dim3(per_threads), 0, 0, dev_a, dev_b, dev_c);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
if (hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost) != hipSuccess) return 1 ;
bool success = true;
for (int i = 0; i < N; i++)
if ((a[i] + b[i]) != c[i])
{
success = false;
break;
}
if (success)
printf("We did it!\n");
else
printf("Failed\n");
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipEventDestroy(start);
hipEventDestroy(stop);
return 0;
} | 1c8613da1cee3bb9beafa1ed33d1300e1ad7cab5.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define N 10000000
__global__ void add(int *a, int *b, int *c)
{
int tx = threadIdx.x;
int bx = blockDim.x * blockIdx.x;
int tid = bx + tx;
while (tid < N)
{
c[tid] = a[tid] + b[tid];
tid += gridDim.x * blockDim.x;
}
}
int main(void)
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
a = (int *)malloc(N * sizeof(int));
b = (int *)malloc(N * sizeof(int));
c = (int *)malloc(N * sizeof(int));
if (cudaMalloc((void **)&dev_a, N * sizeof(int)) != cudaSuccess) return 1;
if (cudaMalloc((void **)&dev_b, N * sizeof(int)) != cudaSuccess) return 1 ;
if (cudaMalloc((void **)&dev_c, N * sizeof(int)) != cudaSuccess) return 1 ;
srand(time(NULL));
for (int i = 0; i < N; i++)
{
a[i] = rand() % N;
b[i] = rand() % N;
}
int *d;
d = (int *)malloc(N * sizeof(int));
int cpu = true;
double elapsedTimeCPU;
struct timespec t_start, t_end;
if (cpu) {
clock_gettime( CLOCK_REALTIME, &t_start);
for (int i = 0; i < N; i++) {
d[i] = a[i] + b[i];
}
clock_gettime( CLOCK_REALTIME, &t_end);
elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0;
elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0;
printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU);
}
if (cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) return 1 ;
if (cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) return 1 ;
int per_threads = 256;
int per_blocks = N / per_threads;
printf("Per threads: %d\n", per_threads);
printf("Per blocks: %d\n", per_blocks);
cudaEventRecord(start, 0);
add<<<per_blocks, per_threads>>>(dev_a, dev_b, dev_c);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
if (cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) return 1 ;
bool success = true;
for (int i = 0; i < N; i++)
if ((a[i] + b[i]) != c[i])
{
success = false;
break;
}
if (success)
printf("We did it!\n");
else
printf("Failed\n");
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
} |
880f8f5ceaa32881e27a8872322ef27564ca89fd.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2021 Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/cuda/cuda_compact_graph.cu
* \brief Functions to find and eliminate the common isolated nodes across
* all given graphs with the same set of nodes.
*/
#include <dgl/runtime/device_api.h>
#include <dgl/immutable_graph.h>
#include <hip/hip_runtime.h>
#include <utility>
#include <algorithm>
#include <memory>
#include "../../../runtime/cuda/cuda_common.h"
#include "../../heterograph.h"
#include "../compact.h"
#include "cuda_map_edges.cuh"
using namespace dgl::aten;
using namespace dgl::runtime::cuda;
using namespace dgl::transform::cuda;
namespace dgl {
namespace transform {
namespace {
/**
* \brief This function builds node maps for each node type, preserving the
* order of the input nodes. Here it is assumed the nodes are not unique,
* and thus a unique list is generated.
*
* \param input_nodes The set of input nodes.
* \param node_maps The node maps to be constructed.
* \param count_unique_device The number of unique nodes (on the GPU).
* \param unique_nodes_device The unique nodes (on the GPU).
* \param stream The stream to operate on.
*/
template<typename IdType>
void BuildNodeMaps(
const std::vector<IdArray>& input_nodes,
DeviceNodeMap<IdType> * const node_maps,
int64_t * const count_unique_device,
std::vector<IdArray>* const unique_nodes_device,
hipStream_t stream) {
const int64_t num_ntypes = static_cast<int64_t>(input_nodes.size());
CUDA_CALL(hipMemsetAsync(
count_unique_device,
0,
num_ntypes*sizeof(*count_unique_device),
stream));
// possibly duplicated nodes
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
const IdArray& nodes = input_nodes[ntype];
if (nodes->shape[0] > 0) {
CHECK_EQ(nodes->ctx.device_type, kDLGPU);
node_maps->LhsHashTable(ntype).FillWithDuplicates(
nodes.Ptr<IdType>(),
nodes->shape[0],
(*unique_nodes_device)[ntype].Ptr<IdType>(),
count_unique_device+ntype,
stream);
}
}
}
template<typename IdType>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphsGPU(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
hipStream_t stream = 0;
const auto& ctx = graphs[0]->Context();
auto device = runtime::DeviceAPI::Get(ctx);
CHECK_EQ(ctx.device_type, kDLGPU);
// Step 1: Collect the nodes that has connections for each type.
const uint64_t num_ntypes = graphs[0]->NumVertexTypes();
std::vector<std::vector<EdgeArray>> all_edges(graphs.size()); // all_edges[i][etype]
// count the number of nodes per type
std::vector<int64_t> max_vertex_cnt(num_ntypes, 0);
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const int64_t num_etypes = curr_graph->NumEdgeTypes();
for (IdType etype = 0; etype < num_etypes; ++etype) {
IdType srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
const int64_t n_edges = curr_graph->NumEdges(etype);
max_vertex_cnt[srctype] += n_edges;
max_vertex_cnt[dsttype] += n_edges;
}
}
for (size_t i = 0; i < always_preserve.size(); ++i) {
max_vertex_cnt[i] += always_preserve[i]->shape[0];
}
// gather all nodes
std::vector<IdArray> all_nodes(num_ntypes);
std::vector<int64_t> node_offsets(num_ntypes, 0);
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
all_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
// copy the nodes in always_preserve
if (ntype < always_preserve.size() && always_preserve[ntype]->shape[0] > 0) {
device->CopyDataFromTo(
always_preserve[ntype].Ptr<IdType>(), 0,
all_nodes[ntype].Ptr<IdType>(),
node_offsets[ntype],
sizeof(IdType)*always_preserve[ntype]->shape[0],
always_preserve[ntype]->ctx,
all_nodes[ntype]->ctx,
always_preserve[ntype]->dtype,
stream);
node_offsets[ntype] += sizeof(IdType)*always_preserve[ntype]->shape[0];
}
}
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const int64_t num_etypes = curr_graph->NumEdgeTypes();
all_edges[i].reserve(num_etypes);
for (int64_t etype = 0; etype < num_etypes; ++etype) {
dgl_type_t srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
const EdgeArray edges = curr_graph->Edges(etype, "eid");
if (edges.src.defined()) {
device->CopyDataFromTo(
edges.src.Ptr<IdType>(), 0,
all_nodes[srctype].Ptr<IdType>(),
node_offsets[srctype],
sizeof(IdType)*edges.src->shape[0],
edges.src->ctx,
all_nodes[srctype]->ctx,
edges.src->dtype,
stream);
node_offsets[srctype] += sizeof(IdType)*edges.src->shape[0];
}
if (edges.dst.defined()) {
device->CopyDataFromTo(
edges.dst.Ptr<IdType>(), 0,
all_nodes[dsttype].Ptr<IdType>(),
node_offsets[dsttype],
sizeof(IdType)*edges.dst->shape[0],
edges.dst->ctx,
all_nodes[dsttype]->ctx,
edges.dst->dtype,
stream);
node_offsets[dsttype] += sizeof(IdType)*edges.dst->shape[0];
}
all_edges[i].push_back(edges);
}
}
// Step 2: Relabel the nodes for each type to a smaller ID space
// using BuildNodeMaps
// allocate space for map creation
// the hashmap on GPU
DeviceNodeMap<IdType> node_maps(max_vertex_cnt, 0, ctx, stream);
// number of unique nodes per type on CPU
std::vector<int64_t> num_induced_nodes(num_ntypes);
// number of unique nodes per type on GPU
int64_t * count_unique_device = static_cast<int64_t*>(
device->AllocWorkspace(ctx, sizeof(int64_t)*num_ntypes));
// the set of unique nodes per type
std::vector<IdArray> induced_nodes(num_ntypes);
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
}
BuildNodeMaps(
all_nodes,
&node_maps,
count_unique_device,
&induced_nodes,
stream);
device->CopyDataFromTo(
count_unique_device, 0,
num_induced_nodes.data(), 0,
sizeof(*num_induced_nodes.data())*num_ntypes,
ctx,
DGLContext{kDLCPU, 0},
DGLType{kDLInt, 64, 1},
stream);
device->StreamSync(ctx, stream);
// wait for the node counts to finish transferring
device->FreeWorkspace(ctx, count_unique_device);
// resize induced nodes
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype]->shape[0] = num_induced_nodes[ntype];
}
// Step 3: Remap the edges of each graph using MapEdges
std::vector<HeteroGraphPtr> new_graphs;
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const auto meta_graph = curr_graph->meta_graph();
const int64_t num_etypes = curr_graph->NumEdgeTypes();
std::vector<HeteroGraphPtr> rel_graphs;
rel_graphs.reserve(num_etypes);
std::vector<IdArray> new_src;
std::vector<IdArray> new_dst;
std::tie(new_src, new_dst) = MapEdges(
curr_graph, all_edges[i], node_maps, stream);
for (IdType etype = 0; etype < num_etypes; ++etype) {
IdType srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
rel_graphs.push_back(UnitGraph::CreateFromCOO(
srctype == dsttype ? 1 : 2,
induced_nodes[srctype]->shape[0],
induced_nodes[dsttype]->shape[0],
new_src[etype],
new_dst[etype]));
}
new_graphs.push_back(CreateHeteroGraph(meta_graph, rel_graphs, num_induced_nodes));
}
return std::make_pair(new_graphs, induced_nodes);
}
} // namespace
template<>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphs<kDLGPU, int32_t>(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
return CompactGraphsGPU<int32_t>(graphs, always_preserve);
}
template<>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphs<kDLGPU, int64_t>(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
return CompactGraphsGPU<int64_t>(graphs, always_preserve);
}
} // namespace transform
} // namespace dgl
| 880f8f5ceaa32881e27a8872322ef27564ca89fd.cu | /*!
* Copyright 2021 Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* \file graph/transform/cuda/cuda_compact_graph.cu
* \brief Functions to find and eliminate the common isolated nodes across
* all given graphs with the same set of nodes.
*/
#include <dgl/runtime/device_api.h>
#include <dgl/immutable_graph.h>
#include <cuda_runtime.h>
#include <utility>
#include <algorithm>
#include <memory>
#include "../../../runtime/cuda/cuda_common.h"
#include "../../heterograph.h"
#include "../compact.h"
#include "cuda_map_edges.cuh"
using namespace dgl::aten;
using namespace dgl::runtime::cuda;
using namespace dgl::transform::cuda;
namespace dgl {
namespace transform {
namespace {
/**
* \brief This function builds node maps for each node type, preserving the
* order of the input nodes. Here it is assumed the nodes are not unique,
* and thus a unique list is generated.
*
* \param input_nodes The set of input nodes.
* \param node_maps The node maps to be constructed.
* \param count_unique_device The number of unique nodes (on the GPU).
* \param unique_nodes_device The unique nodes (on the GPU).
* \param stream The stream to operate on.
*/
template<typename IdType>
void BuildNodeMaps(
const std::vector<IdArray>& input_nodes,
DeviceNodeMap<IdType> * const node_maps,
int64_t * const count_unique_device,
std::vector<IdArray>* const unique_nodes_device,
cudaStream_t stream) {
const int64_t num_ntypes = static_cast<int64_t>(input_nodes.size());
CUDA_CALL(cudaMemsetAsync(
count_unique_device,
0,
num_ntypes*sizeof(*count_unique_device),
stream));
// possibly duplicated nodes
for (int64_t ntype = 0; ntype < num_ntypes; ++ntype) {
const IdArray& nodes = input_nodes[ntype];
if (nodes->shape[0] > 0) {
CHECK_EQ(nodes->ctx.device_type, kDLGPU);
node_maps->LhsHashTable(ntype).FillWithDuplicates(
nodes.Ptr<IdType>(),
nodes->shape[0],
(*unique_nodes_device)[ntype].Ptr<IdType>(),
count_unique_device+ntype,
stream);
}
}
}
template<typename IdType>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphsGPU(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
cudaStream_t stream = 0;
const auto& ctx = graphs[0]->Context();
auto device = runtime::DeviceAPI::Get(ctx);
CHECK_EQ(ctx.device_type, kDLGPU);
// Step 1: Collect the nodes that has connections for each type.
const uint64_t num_ntypes = graphs[0]->NumVertexTypes();
std::vector<std::vector<EdgeArray>> all_edges(graphs.size()); // all_edges[i][etype]
// count the number of nodes per type
std::vector<int64_t> max_vertex_cnt(num_ntypes, 0);
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const int64_t num_etypes = curr_graph->NumEdgeTypes();
for (IdType etype = 0; etype < num_etypes; ++etype) {
IdType srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
const int64_t n_edges = curr_graph->NumEdges(etype);
max_vertex_cnt[srctype] += n_edges;
max_vertex_cnt[dsttype] += n_edges;
}
}
for (size_t i = 0; i < always_preserve.size(); ++i) {
max_vertex_cnt[i] += always_preserve[i]->shape[0];
}
// gather all nodes
std::vector<IdArray> all_nodes(num_ntypes);
std::vector<int64_t> node_offsets(num_ntypes, 0);
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
all_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
// copy the nodes in always_preserve
if (ntype < always_preserve.size() && always_preserve[ntype]->shape[0] > 0) {
device->CopyDataFromTo(
always_preserve[ntype].Ptr<IdType>(), 0,
all_nodes[ntype].Ptr<IdType>(),
node_offsets[ntype],
sizeof(IdType)*always_preserve[ntype]->shape[0],
always_preserve[ntype]->ctx,
all_nodes[ntype]->ctx,
always_preserve[ntype]->dtype,
stream);
node_offsets[ntype] += sizeof(IdType)*always_preserve[ntype]->shape[0];
}
}
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const int64_t num_etypes = curr_graph->NumEdgeTypes();
all_edges[i].reserve(num_etypes);
for (int64_t etype = 0; etype < num_etypes; ++etype) {
dgl_type_t srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
const EdgeArray edges = curr_graph->Edges(etype, "eid");
if (edges.src.defined()) {
device->CopyDataFromTo(
edges.src.Ptr<IdType>(), 0,
all_nodes[srctype].Ptr<IdType>(),
node_offsets[srctype],
sizeof(IdType)*edges.src->shape[0],
edges.src->ctx,
all_nodes[srctype]->ctx,
edges.src->dtype,
stream);
node_offsets[srctype] += sizeof(IdType)*edges.src->shape[0];
}
if (edges.dst.defined()) {
device->CopyDataFromTo(
edges.dst.Ptr<IdType>(), 0,
all_nodes[dsttype].Ptr<IdType>(),
node_offsets[dsttype],
sizeof(IdType)*edges.dst->shape[0],
edges.dst->ctx,
all_nodes[dsttype]->ctx,
edges.dst->dtype,
stream);
node_offsets[dsttype] += sizeof(IdType)*edges.dst->shape[0];
}
all_edges[i].push_back(edges);
}
}
// Step 2: Relabel the nodes for each type to a smaller ID space
// using BuildNodeMaps
// allocate space for map creation
// the hashmap on GPU
DeviceNodeMap<IdType> node_maps(max_vertex_cnt, 0, ctx, stream);
// number of unique nodes per type on CPU
std::vector<int64_t> num_induced_nodes(num_ntypes);
// number of unique nodes per type on GPU
int64_t * count_unique_device = static_cast<int64_t*>(
device->AllocWorkspace(ctx, sizeof(int64_t)*num_ntypes));
// the set of unique nodes per type
std::vector<IdArray> induced_nodes(num_ntypes);
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype] = NewIdArray(max_vertex_cnt[ntype], ctx,
sizeof(IdType)*8);
}
BuildNodeMaps(
all_nodes,
&node_maps,
count_unique_device,
&induced_nodes,
stream);
device->CopyDataFromTo(
count_unique_device, 0,
num_induced_nodes.data(), 0,
sizeof(*num_induced_nodes.data())*num_ntypes,
ctx,
DGLContext{kDLCPU, 0},
DGLType{kDLInt, 64, 1},
stream);
device->StreamSync(ctx, stream);
// wait for the node counts to finish transferring
device->FreeWorkspace(ctx, count_unique_device);
// resize induced nodes
for (uint64_t ntype = 0; ntype < num_ntypes; ++ntype) {
induced_nodes[ntype]->shape[0] = num_induced_nodes[ntype];
}
// Step 3: Remap the edges of each graph using MapEdges
std::vector<HeteroGraphPtr> new_graphs;
for (size_t i = 0; i < graphs.size(); ++i) {
const HeteroGraphPtr curr_graph = graphs[i];
const auto meta_graph = curr_graph->meta_graph();
const int64_t num_etypes = curr_graph->NumEdgeTypes();
std::vector<HeteroGraphPtr> rel_graphs;
rel_graphs.reserve(num_etypes);
std::vector<IdArray> new_src;
std::vector<IdArray> new_dst;
std::tie(new_src, new_dst) = MapEdges(
curr_graph, all_edges[i], node_maps, stream);
for (IdType etype = 0; etype < num_etypes; ++etype) {
IdType srctype, dsttype;
std::tie(srctype, dsttype) = curr_graph->GetEndpointTypes(etype);
rel_graphs.push_back(UnitGraph::CreateFromCOO(
srctype == dsttype ? 1 : 2,
induced_nodes[srctype]->shape[0],
induced_nodes[dsttype]->shape[0],
new_src[etype],
new_dst[etype]));
}
new_graphs.push_back(CreateHeteroGraph(meta_graph, rel_graphs, num_induced_nodes));
}
return std::make_pair(new_graphs, induced_nodes);
}
} // namespace
template<>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphs<kDLGPU, int32_t>(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
return CompactGraphsGPU<int32_t>(graphs, always_preserve);
}
template<>
std::pair<std::vector<HeteroGraphPtr>, std::vector<IdArray>>
CompactGraphs<kDLGPU, int64_t>(
const std::vector<HeteroGraphPtr> &graphs,
const std::vector<IdArray> &always_preserve) {
return CompactGraphsGPU<int64_t>(graphs, always_preserve);
}
} // namespace transform
} // namespace dgl
|
68eb83613e241e59cd828c5d5d425769825428cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mma.h>
#include "../bm_config.hpp"
using namespace nvcuda;
// The only dimensions currently supported by WMMA
const int WMMA_M = 16;
const int WMMA_N = 16;
const int WMMA_K = 16;
// // Performs an MxNxK GEMM (C=alpha*A*B + beta*C) assuming:
// // 1) Matrices are packed in memory.
// // 2) M, N and K are multiples of 16.
// // 3) Neither A nor B are transposed.
// // Note: This is NOT a high performance example but is for demonstration purposes only
// // For a high performance code please use the GEMM provided in cuBLAS.
__global__ void wmma_example(half* a, half* b, float* c, int M, int N, int K) {
// Leading dimensions. Packed with no transpositions.
int lda = M;
int ldb = K;
int ldc = M;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < K; i += WMMA_K) {
int aRow = warpM * WMMA_M;
int aCol = i;
int bRow = i;
int bCol = warpN * WMMA_N;
// Bounds checking
if (aRow < M && aCol < K && bRow < K && bCol < N) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aRow + aCol * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
int cRow = warpM * WMMA_M;
int cCol = warpN * WMMA_N;
wmma::store_matrix_sync(c + cRow + cCol * ldc, acc_frag, ldc, wmma::mem_col_major);
}
void bm_cuda_wmma_example(benchmark::State& state) {
// Must be multiples of 16 for wmma code to work
const int M = 4096;
const int N = 4096;
const int K = 4096;
typedef dim<128, 4> BLOCK_DIM;
pointi<2> block_dim = BLOCK_DIM::value();
pointi<2> warp_dim = {16, 16};
cuda::tensor<half, 2> cmat_a(pointi<2>{M, K});
cuda::tensor<half, 2> cmat_b(pointi<2>{K, N});
cuda::tensor<float, 2> cmat_c(pointi<2>{M, N});
auto grid_dim = cmat_c.shape() / pointi<2>{64, 64};
while (state.KeepRunning()) {
hipLaunchKernelGGL(( wmma_example), dim3(cuda::internal::pointi_to_dim3(grid_dim)), dim3(dim3(128, 4)), 0, 0,
cmat_a.data(), cmat_b.data(), cmat_c.data(), M, N, K);
hipDeviceSynchronize();
}
state.SetItemsProcessed(state.iterations() * M * N * K);
}
BENCHMARK(bm_cuda_wmma_example);
void bm_cuda_wmma(benchmark::State& state) {
// Must be multiples of 16 for wmma code to work
const int M = 4096;
const int N = 4096;
const int K = 4096;
typedef dim<128, 4> BLOCK_DIM;
pointi<2> block_dim = BLOCK_DIM::value();
pointi<2> warp_dim = {16, 16};
cuda::tensor<half, 2> cmat_a(pointi<2>{M, K});
cuda::tensor<half, 2> cmat_b(pointi<2>{K, N});
cuda::tensor<float, 2> cmat_c(pointi<2>{M, N});
auto grid_dim = cmat_c.shape() / pointi<2>{64, 64};
while (state.KeepRunning()) {
cuda::block_for_index<BLOCK_DIM>(grid_dim, [=] __device__(
cuda::block_index<BLOCK_DIM> block_idx) {
int warpM = block_idx.global[0] / 32;
int warpN = block_idx.global[1];
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
// Loop over k
for (int i = 0; i < K; i += WMMA_K) {
int aRow = warpM * WMMA_M;
int aCol = i;
int bRow = i;
int bCol = warpN * WMMA_N;
// Bounds checking
if (aRow < M && aCol < K && bRow < K && bCol < N) {
// Load the inputs
wmma::load_matrix_sync(a_frag, &cmat_a(aRow, aCol), cmat_a.shape()[0]);
wmma::load_matrix_sync(b_frag, &cmat_b(bRow, bCol), cmat_b.shape()[0]);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
auto cRow = warpM * WMMA_M;
auto cCol = warpN * WMMA_N;
wmma::store_matrix_sync(&cmat_c(cRow, cCol), acc_frag, cmat_c.shape()[0],
wmma::mem_col_major);
});
}
state.SetItemsProcessed(state.iterations() * M * N * K);
}
BENCHMARK(bm_cuda_wmma); | 68eb83613e241e59cd828c5d5d425769825428cd.cu |
#include <mma.h>
#include "../bm_config.hpp"
using namespace nvcuda;
// The only dimensions currently supported by WMMA
const int WMMA_M = 16;
const int WMMA_N = 16;
const int WMMA_K = 16;
// // Performs an MxNxK GEMM (C=alpha*A*B + beta*C) assuming:
// // 1) Matrices are packed in memory.
// // 2) M, N and K are multiples of 16.
// // 3) Neither A nor B are transposed.
// // Note: This is NOT a high performance example but is for demonstration purposes only
// // For a high performance code please use the GEMM provided in cuBLAS.
__global__ void wmma_example(half* a, half* b, float* c, int M, int N, int K) {
// Leading dimensions. Packed with no transpositions.
int lda = M;
int ldb = K;
int ldc = M;
// Tile using a 2D grid
int warpM = (blockIdx.x * blockDim.x + threadIdx.x) / warpSize;
int warpN = (blockIdx.y * blockDim.y + threadIdx.y);
// Declare the fragments
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> c_frag;
wmma::fill_fragment(acc_frag, 0.0f);
// Loop over k
for (int i = 0; i < K; i += WMMA_K) {
int aRow = warpM * WMMA_M;
int aCol = i;
int bRow = i;
int bCol = warpN * WMMA_N;
// Bounds checking
if (aRow < M && aCol < K && bRow < K && bCol < N) {
// Load the inputs
wmma::load_matrix_sync(a_frag, a + aRow + aCol * lda, lda);
wmma::load_matrix_sync(b_frag, b + bRow + bCol * ldb, ldb);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
int cRow = warpM * WMMA_M;
int cCol = warpN * WMMA_N;
wmma::store_matrix_sync(c + cRow + cCol * ldc, acc_frag, ldc, wmma::mem_col_major);
}
void bm_cuda_wmma_example(benchmark::State& state) {
// Must be multiples of 16 for wmma code to work
const int M = 4096;
const int N = 4096;
const int K = 4096;
typedef dim<128, 4> BLOCK_DIM;
pointi<2> block_dim = BLOCK_DIM::value();
pointi<2> warp_dim = {16, 16};
cuda::tensor<half, 2> cmat_a(pointi<2>{M, K});
cuda::tensor<half, 2> cmat_b(pointi<2>{K, N});
cuda::tensor<float, 2> cmat_c(pointi<2>{M, N});
auto grid_dim = cmat_c.shape() / pointi<2>{64, 64};
while (state.KeepRunning()) {
wmma_example<<<cuda::internal::pointi_to_dim3(grid_dim), dim3(128, 4)>>>(
cmat_a.data(), cmat_b.data(), cmat_c.data(), M, N, K);
cudaDeviceSynchronize();
}
state.SetItemsProcessed(state.iterations() * M * N * K);
}
BENCHMARK(bm_cuda_wmma_example);
void bm_cuda_wmma(benchmark::State& state) {
// Must be multiples of 16 for wmma code to work
const int M = 4096;
const int N = 4096;
const int K = 4096;
typedef dim<128, 4> BLOCK_DIM;
pointi<2> block_dim = BLOCK_DIM::value();
pointi<2> warp_dim = {16, 16};
cuda::tensor<half, 2> cmat_a(pointi<2>{M, K});
cuda::tensor<half, 2> cmat_b(pointi<2>{K, N});
cuda::tensor<float, 2> cmat_c(pointi<2>{M, N});
auto grid_dim = cmat_c.shape() / pointi<2>{64, 64};
while (state.KeepRunning()) {
cuda::block_for_index<BLOCK_DIM>(grid_dim, [=] __device__(
cuda::block_index<BLOCK_DIM> block_idx) {
int warpM = block_idx.global[0] / 32;
int warpN = block_idx.global[1];
wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> a_frag;
wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> b_frag;
wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K, float> acc_frag;
// Loop over k
for (int i = 0; i < K; i += WMMA_K) {
int aRow = warpM * WMMA_M;
int aCol = i;
int bRow = i;
int bCol = warpN * WMMA_N;
// Bounds checking
if (aRow < M && aCol < K && bRow < K && bCol < N) {
// Load the inputs
wmma::load_matrix_sync(a_frag, &cmat_a(aRow, aCol), cmat_a.shape()[0]);
wmma::load_matrix_sync(b_frag, &cmat_b(bRow, bCol), cmat_b.shape()[0]);
// Perform the matrix multiplication
wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
}
}
auto cRow = warpM * WMMA_M;
auto cCol = warpN * WMMA_N;
wmma::store_matrix_sync(&cmat_c(cRow, cCol), acc_frag, cmat_c.shape()[0],
wmma::mem_col_major);
});
}
state.SetItemsProcessed(state.iterations() * M * N * K);
}
BENCHMARK(bm_cuda_wmma); |
9c553ece55bd3ecb2bda8a10707c6862d5cd02eb.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
LIBRARY: NDT-NEW DEVELOPER TOOLS
FILE: ndt_timer.cu
AUTHOR: Zehuan Wang
DATA: 12/20/2012
Timer tools
**************************************************************************
**************************************************************************
ROUTINES:
ndt_gpu_timer_start()
ndt_gpu_timer_end()
ndt_cpu_timer_start()
ndt_cpu_timer_end()
ndt_timer_print()
ndt_error_print()
*************************************************************************/
#include "ndt.h"
extern hipError_t ndt_gpu_timer_start(hipEvent_t *ndt_o_pcudaEvent_start, hipEvent_t *ndt_o_pcudaEvent_end)
{
hipError_t ndt_err;
hipEventCreate(ndt_o_pcudaEvent_start);
hipEventCreate(ndt_o_pcudaEvent_end);
ndt_err = hipEventRecord(*ndt_o_pcudaEvent_start, 0);
return ndt_err;
}
extern hipError_t ndt_gpu_timer_end(hipEvent_t ndt_i_cudaEvent_start, hipEvent_t ndt_i_cudaEvent_end, float* ndt_o_pf_time)
{
hipError_t ndt_err;
hipEventRecord(ndt_i_cudaEvent_end,0);
hipEventSynchronize(ndt_i_cudaEvent_end);
hipEventElapsedTime(ndt_o_pf_time,ndt_i_cudaEvent_start,ndt_i_cudaEvent_end);
hipEventDestroy( ndt_i_cudaEvent_start );
ndt_err = hipEventDestroy( ndt_i_cudaEvent_end );
return ndt_err;
}
extern ndt_error ndt_cpu_timer_start(long long* ndt_o_pf_start)
{
if(ndt_o_pf_start == NULL)
return ndt_input_null_pointer;
struct timeval start;
gettimeofday(&start,NULL);
*ndt_o_pf_start = start.tv_sec*1000000+start.tv_usec;
return ndt_success;
}
extern ndt_error ndt_cpu_timer_end(long long ndt_i_f_start, float* ndt_o_pf_time)
{
if(ndt_o_pf_time == NULL)
return ndt_input_null_pointer;
struct timeval end;
gettimeofday(&end,NULL);
*ndt_o_pf_time = (end.tv_sec*1000000+end.tv_usec - ndt_i_f_start)/1000.0;
return ndt_success;
}
extern void ndt_timer_print(float ndt_i_pf_time)
{
printf("The time elapsed is [%fms]\n",ndt_i_pf_time);
return;
}
extern void ndt_error_print(ndt_error ndt_i_error)
{
switch(ndt_i_error)
{
case ndt_success:
printf("nv zh Success\n");
break;
case ndt_input_null_pointer:
printf("nv zh input null pointer\n");
break;
default:
printf("nv zh unknown error\n");
break;
}
return;
}
| 9c553ece55bd3ecb2bda8a10707c6862d5cd02eb.cu | /*************************************************************************
LIBRARY: NDT-NEW DEVELOPER TOOLS
FILE: ndt_timer.cu
AUTHOR: Zehuan Wang
DATA: 12/20/2012
Timer tools
**************************************************************************
**************************************************************************
ROUTINES:
ndt_gpu_timer_start()
ndt_gpu_timer_end()
ndt_cpu_timer_start()
ndt_cpu_timer_end()
ndt_timer_print()
ndt_error_print()
*************************************************************************/
#include "ndt.h"
extern cudaError_t ndt_gpu_timer_start(cudaEvent_t *ndt_o_pcudaEvent_start, cudaEvent_t *ndt_o_pcudaEvent_end)
{
cudaError_t ndt_err;
cudaEventCreate(ndt_o_pcudaEvent_start);
cudaEventCreate(ndt_o_pcudaEvent_end);
ndt_err = cudaEventRecord(*ndt_o_pcudaEvent_start, 0);
return ndt_err;
}
extern cudaError_t ndt_gpu_timer_end(cudaEvent_t ndt_i_cudaEvent_start, cudaEvent_t ndt_i_cudaEvent_end, float* ndt_o_pf_time)
{
cudaError_t ndt_err;
cudaEventRecord(ndt_i_cudaEvent_end,0);
cudaEventSynchronize(ndt_i_cudaEvent_end);
cudaEventElapsedTime(ndt_o_pf_time,ndt_i_cudaEvent_start,ndt_i_cudaEvent_end);
cudaEventDestroy( ndt_i_cudaEvent_start );
ndt_err = cudaEventDestroy( ndt_i_cudaEvent_end );
return ndt_err;
}
extern ndt_error ndt_cpu_timer_start(long long* ndt_o_pf_start)
{
if(ndt_o_pf_start == NULL)
return ndt_input_null_pointer;
struct timeval start;
gettimeofday(&start,NULL);
*ndt_o_pf_start = start.tv_sec*1000000+start.tv_usec;
return ndt_success;
}
extern ndt_error ndt_cpu_timer_end(long long ndt_i_f_start, float* ndt_o_pf_time)
{
if(ndt_o_pf_time == NULL)
return ndt_input_null_pointer;
struct timeval end;
gettimeofday(&end,NULL);
*ndt_o_pf_time = (end.tv_sec*1000000+end.tv_usec - ndt_i_f_start)/1000.0;
return ndt_success;
}
extern void ndt_timer_print(float ndt_i_pf_time)
{
printf("The time elapsed is [%fms]\n",ndt_i_pf_time);
return;
}
extern void ndt_error_print(ndt_error ndt_i_error)
{
switch(ndt_i_error)
{
case ndt_success:
printf("nv zh Success\n");
break;
case ndt_input_null_pointer:
printf("nv zh input null pointer\n");
break;
default:
printf("nv zh unknown error\n");
break;
}
return;
}
|
8d59762a2056330307ff646ca41dd988ff5a19bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#include <memory>
namespace StreamCompaction
{
namespace Efficient
{
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
//taken from printArray in main
void printArray(int n, int* a, bool abridged = false)
{
printf(" [ ");
for (int i = 0; i < n; i++)
{
if (abridged && i + 2 == 15 && n > 16)
{
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
//pow function
__device__ __host__ int kernPow2(int power_of)
{
int result = 1;
for (int i = 0; i < power_of; i++)
{
result <<= 1;
}
return result;
}
//round to the nearest power of 2 (ceiling)
void round_to_nearest_pow(int& n)
{
//round n to the nearest pow of n
n = ::ceil(std::log2(n));
n = kernPow2(n);
}
//up sweep function
__global__ void kernUpSweep(int N, int* odata, int d)
{
int k = threadIdx.x + (blockIdx.x * blockDim.x);
//k is multiplied by pow to index the correct location
k *= kernPow2(d + 1);
if (k > N)
return;
//formula
odata[k + kernPow2(d + 1) - 1] += odata[k + kernPow2(d) - 1];
}
__global__ void kernDownSweep(int N, int* odata, int d)
{
int k = threadIdx.x + (blockIdx.x * blockDim.x);
k *= kernPow2(d + 1);
if (k > N)
return;
//formula
int t = odata[k + kernPow2(d) - 1];
odata[k + kernPow2(d) - 1] = odata[k + kernPow2(d + 1) - 1];
odata[k + kernPow2(d + 1) - 1] += t;
}
#define BLOCK_SIZE 128
//actual implementation of scan
//because timer().startGpuTimer() is called inside
//scan(...) from scatter(...), causing an abort
void scan_impl(int n, int* odata, const int* idata)
{
//round to nearest pow because n might not be a power of 2
round_to_nearest_pow(n);
dim3 kernelBlock((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
int* kern_odata;
hipMalloc(reinterpret_cast<void**>(&kern_odata), n * sizeof(int));
hipMemcpy(kern_odata, idata, n * sizeof(int), hipMemcpyHostToDevice);
//call upsweep
for (int d = 0; static_cast<float>(d) < ::ceil(std::log2(n)); d++)
{
hipLaunchKernelGGL(( kernUpSweep), dim3(kernelBlock), dim3(BLOCK_SIZE), 0, 0, n, kern_odata, d);
//hipMemcpy(odata, kern_odata, n * sizeof(int), hipMemcpyDeviceToHost);
//printArray(n, odata, true);
//printArray(n/2, odata + n/2, true);
}
//printf("=====================\n");
//copy 0 to the end (from formula)
int zero = 0;
hipMemcpy(kern_odata + n - 1, &zero, sizeof(int), hipMemcpyHostToDevice);
//down sweep
for (int d = static_cast<int>(::ceil(std::log2(n))) - 1; d >= 0; d--)
{
hipLaunchKernelGGL(( kernDownSweep), dim3(kernelBlock), dim3(BLOCK_SIZE), 0, 0, n, kern_odata, d);
//hipMemcpy(odata, kern_odata, n * sizeof(int), hipMemcpyDeviceToHost);
//printArray(n, odata, true);
//printArray(n/2, odata + n/2, true);
}
//copy the result
hipMemcpy(odata, kern_odata, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(kern_odata);
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int* odata, const int* idata)
{
timer().startGpuTimer();
scan_impl(n, odata, idata);
timer().endGpuTimer();
}
__global__ void kernScatter(int N, int* final_array, const int* bool_array, const int* scan_array,
const int* unfiltered_array)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index > N)
return;
//filter only elements that are not zero in the bool map.
if (bool_array[index])
{
//get the index of where the element is suppose to be in the in the final array
const int index_of_filtered_element = scan_array[index];
final_array[index_of_filtered_element] = unfiltered_array[index];
}
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int* odata, const int* idata)
{
timer().startGpuTimer();
dim3 kernelBlock((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
//make another variable that is the power of n (we need this since counter can't iterate through pow n)
int rounded_n = n;
round_to_nearest_pow(rounded_n);
auto counters = std::make_unique<int[]>(rounded_n);
memset(counters.get(), 0, rounded_n * sizeof(int));
//idata
int* unfiltered_array;
hipMalloc(reinterpret_cast<void**>(&unfiltered_array), n * sizeof(int));
hipMemcpy(unfiltered_array, idata, n * sizeof(int), hipMemcpyHostToDevice);
//bool mapping (1 or 0)
int* bool_array;
hipMalloc(reinterpret_cast<void**>(&bool_array), n * sizeof(int));
hipLaunchKernelGGL(( Common::kernMapToBoolean), dim3(kernelBlock), dim3(BLOCK_SIZE), 0, 0, n, bool_array, unfiltered_array);
hipMemcpy(counters.get(), bool_array, n * sizeof(int), hipMemcpyDeviceToHost);
int count = 0;
//iterate through and count
for (int i = 0; i < n; i++)
{
if (counters[i])
{
count++;
}
}
//now round to nearest pow
round_to_nearest_pow(n);
kernelBlock = dim3((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
auto scan_result = std::make_unique<int[]>(n);
memcpy(scan_result.get(), counters.get(), n);
//scan
scan_impl(n, scan_result.get(), counters.get());
int* final_array;
int* scan_array;
hipMalloc(reinterpret_cast<void**>(&final_array), n * sizeof(int));
hipMalloc(reinterpret_cast<void**>(&scan_array), n * sizeof(int));
hipMemcpy(scan_array, scan_result.get(), n * sizeof(int), hipMemcpyHostToDevice);
//do scatter
hipLaunchKernelGGL(( kernScatter), dim3(kernelBlock), dim3(BLOCK_SIZE), 0, 0, n, final_array, bool_array, scan_array, unfiltered_array);
//copy the result back
hipMemcpy(odata, final_array, count * sizeof(int), hipMemcpyDeviceToHost);
hipFree(final_array);
hipFree(bool_array);
hipFree(scan_array);
hipFree(unfiltered_array);
timer().endGpuTimer();
return count;
}
}
}
| 8d59762a2056330307ff646ca41dd988ff5a19bd.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#include <memory>
namespace StreamCompaction
{
namespace Efficient
{
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
//taken from printArray in main
void printArray(int n, int* a, bool abridged = false)
{
printf(" [ ");
for (int i = 0; i < n; i++)
{
if (abridged && i + 2 == 15 && n > 16)
{
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
//pow function
__device__ __host__ int kernPow2(int power_of)
{
int result = 1;
for (int i = 0; i < power_of; i++)
{
result <<= 1;
}
return result;
}
//round to the nearest power of 2 (ceiling)
void round_to_nearest_pow(int& n)
{
//round n to the nearest pow of n
n = std::ceil(std::log2(n));
n = kernPow2(n);
}
//up sweep function
__global__ void kernUpSweep(int N, int* odata, int d)
{
int k = threadIdx.x + (blockIdx.x * blockDim.x);
//k is multiplied by pow to index the correct location
k *= kernPow2(d + 1);
if (k > N)
return;
//formula
odata[k + kernPow2(d + 1) - 1] += odata[k + kernPow2(d) - 1];
}
__global__ void kernDownSweep(int N, int* odata, int d)
{
int k = threadIdx.x + (blockIdx.x * blockDim.x);
k *= kernPow2(d + 1);
if (k > N)
return;
//formula
int t = odata[k + kernPow2(d) - 1];
odata[k + kernPow2(d) - 1] = odata[k + kernPow2(d + 1) - 1];
odata[k + kernPow2(d + 1) - 1] += t;
}
#define BLOCK_SIZE 128
//actual implementation of scan
//because timer().startGpuTimer() is called inside
//scan(...) from scatter(...), causing an abort
void scan_impl(int n, int* odata, const int* idata)
{
//round to nearest pow because n might not be a power of 2
round_to_nearest_pow(n);
dim3 kernelBlock((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
int* kern_odata;
cudaMalloc(reinterpret_cast<void**>(&kern_odata), n * sizeof(int));
cudaMemcpy(kern_odata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
//call upsweep
for (int d = 0; static_cast<float>(d) < std::ceil(std::log2(n)); d++)
{
kernUpSweep<<<kernelBlock, BLOCK_SIZE>>>(n, kern_odata, d);
//cudaMemcpy(odata, kern_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
//printArray(n, odata, true);
//printArray(n/2, odata + n/2, true);
}
//printf("=====================\n");
//copy 0 to the end (from formula)
int zero = 0;
cudaMemcpy(kern_odata + n - 1, &zero, sizeof(int), cudaMemcpyHostToDevice);
//down sweep
for (int d = static_cast<int>(std::ceil(std::log2(n))) - 1; d >= 0; d--)
{
kernDownSweep<<<kernelBlock, BLOCK_SIZE>>>(n, kern_odata, d);
//cudaMemcpy(odata, kern_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
//printArray(n, odata, true);
//printArray(n/2, odata + n/2, true);
}
//copy the result
cudaMemcpy(odata, kern_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(kern_odata);
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int* odata, const int* idata)
{
timer().startGpuTimer();
scan_impl(n, odata, idata);
timer().endGpuTimer();
}
__global__ void kernScatter(int N, int* final_array, const int* bool_array, const int* scan_array,
const int* unfiltered_array)
{
const int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index > N)
return;
//filter only elements that are not zero in the bool map.
if (bool_array[index])
{
//get the index of where the element is suppose to be in the in the final array
const int index_of_filtered_element = scan_array[index];
final_array[index_of_filtered_element] = unfiltered_array[index];
}
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int* odata, const int* idata)
{
timer().startGpuTimer();
dim3 kernelBlock((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
//make another variable that is the power of n (we need this since counter can't iterate through pow n)
int rounded_n = n;
round_to_nearest_pow(rounded_n);
auto counters = std::make_unique<int[]>(rounded_n);
memset(counters.get(), 0, rounded_n * sizeof(int));
//idata
int* unfiltered_array;
cudaMalloc(reinterpret_cast<void**>(&unfiltered_array), n * sizeof(int));
cudaMemcpy(unfiltered_array, idata, n * sizeof(int), cudaMemcpyHostToDevice);
//bool mapping (1 or 0)
int* bool_array;
cudaMalloc(reinterpret_cast<void**>(&bool_array), n * sizeof(int));
Common::kernMapToBoolean<<<kernelBlock, BLOCK_SIZE>>>(n, bool_array, unfiltered_array);
cudaMemcpy(counters.get(), bool_array, n * sizeof(int), cudaMemcpyDeviceToHost);
int count = 0;
//iterate through and count
for (int i = 0; i < n; i++)
{
if (counters[i])
{
count++;
}
}
//now round to nearest pow
round_to_nearest_pow(n);
kernelBlock = dim3((n + BLOCK_SIZE - 1) / BLOCK_SIZE);
auto scan_result = std::make_unique<int[]>(n);
memcpy(scan_result.get(), counters.get(), n);
//scan
scan_impl(n, scan_result.get(), counters.get());
int* final_array;
int* scan_array;
cudaMalloc(reinterpret_cast<void**>(&final_array), n * sizeof(int));
cudaMalloc(reinterpret_cast<void**>(&scan_array), n * sizeof(int));
cudaMemcpy(scan_array, scan_result.get(), n * sizeof(int), cudaMemcpyHostToDevice);
//do scatter
kernScatter<<<kernelBlock, BLOCK_SIZE>>>(n, final_array, bool_array, scan_array, unfiltered_array);
//copy the result back
cudaMemcpy(odata, final_array, count * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(final_array);
cudaFree(bool_array);
cudaFree(scan_array);
cudaFree(unfiltered_array);
timer().endGpuTimer();
return count;
}
}
}
|
7b1de7ea919d209d45cdf33d6f52409f47c294ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/crop_layer.hpp"
namespace caffe9 {
// Copy (one line per thread) from one array to another, with arbitrary
// strides in the last two dimensions.
template <typename Dtype>
__global__ void copy_kernel(const int n, const int height, const int width,
const int src_outer_stride, const int src_inner_stride,
const int dest_outer_stride, const int dest_inner_stride,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, n) {
int src_start = index / height * src_outer_stride
+ index % height * src_inner_stride;
int dest_start = index / height * dest_outer_stride
+ index % height * dest_inner_stride;
for (int i = 0; i < width; ++i) {
dest[dest_start + i] = src[src_start + i];
}
}
}
// recursive copy function, this function is similar to crop_copy but loops
// over all but the last two dimensions. It is implemented this way to allow
// for ND cropping while still relying on a CUDA kernel for the innermost
// two dimensions for performance reasons.
// An alternative way to implement ND cropping relying more on the kernel
// would require passing offsets to the kernel, which is a bit problematic
// because it is of variable length. Since in the standard (N,C,W,H) case
// N,C are usually not cropped a speedup could be achieved by not looping
// the application of the copy_kernel around these dimensions.
template <typename Dtype>
void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const vector<int>& offsets,
vector<int> indices,
int cur_dim,
const Dtype* src_data,
Dtype* dest_data,
bool is_forward) {
if (cur_dim + 2 < top[0]->num_axes()) {
// We are not yet at the final dimension, call copy recursivley
for (int i = 0; i < top[0]->shape(cur_dim); ++i) {
indices[cur_dim] = i;
crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1,
src_data, dest_data, is_forward);
}
} else {
// We are at the last two dimensions, which are stored continously in memory
// With (N,C,H,W)
// (0,1,2,3) cur_dim -> H
// cur_dim+1 -> W
const int lines = top[0]->shape(cur_dim);
const int height = top[0]->shape(cur_dim);
const int width = top[0]->shape(cur_dim+1);
std::vector<int> ind_off(cur_dim+2, 0);
for (int j = 0; j < cur_dim; ++j) {
ind_off[j] = indices[j] + offsets[j];
}
ind_off[cur_dim] = offsets[cur_dim];
ind_off[cur_dim+1] = offsets[cur_dim+1];
// Compute copy strides
const int src_outer_stride =
bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1);
const int src_inner_stride = bottom[0]->shape(cur_dim+1);
const int dest_outer_stride =
top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1);
const int dest_inner_stride = top[0]->shape(cur_dim+1);
if (is_forward) {
const Dtype* bottom_data = bottom[0]->gpu_data() +
bottom[0]->offset(ind_off);
Dtype* top_data = top[0]->mutable_gpu_data() +
top[0]->offset(indices);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
lines, height, width,
src_outer_stride, src_inner_stride,
dest_outer_stride, dest_inner_stride,
bottom_data, top_data);
} else {
const Dtype* top_diff = top[0]->gpu_diff() +
top[0]->offset(indices);
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() +
bottom[0]->offset(ind_off);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
lines, height, width,
dest_outer_stride, dest_inner_stride,
src_outer_stride, src_inner_stride,
top_diff, bottom_diff);
}
}
}
template <typename Dtype>
void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
std::vector<int> indices(top[0]->num_axes(), 0);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true);
}
template <typename Dtype>
void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (propagate_down[0]) {
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
std::vector<int> indices(top[0]->num_axes(), 0);
crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff,
false);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
} // namespace caffe9
| 7b1de7ea919d209d45cdf33d6f52409f47c294ce.cu | #include <vector>
#include "caffe/layers/crop_layer.hpp"
namespace caffe9 {
// Copy (one line per thread) from one array to another, with arbitrary
// strides in the last two dimensions.
template <typename Dtype>
__global__ void copy_kernel(const int n, const int height, const int width,
const int src_outer_stride, const int src_inner_stride,
const int dest_outer_stride, const int dest_inner_stride,
const Dtype* src, Dtype* dest) {
CUDA_KERNEL_LOOP(index, n) {
int src_start = index / height * src_outer_stride
+ index % height * src_inner_stride;
int dest_start = index / height * dest_outer_stride
+ index % height * dest_inner_stride;
for (int i = 0; i < width; ++i) {
dest[dest_start + i] = src[src_start + i];
}
}
}
// recursive copy function, this function is similar to crop_copy but loops
// over all but the last two dimensions. It is implemented this way to allow
// for ND cropping while still relying on a CUDA kernel for the innermost
// two dimensions for performance reasons.
// An alternative way to implement ND cropping relying more on the kernel
// would require passing offsets to the kernel, which is a bit problematic
// because it is of variable length. Since in the standard (N,C,W,H) case
// N,C are usually not cropped a speedup could be achieved by not looping
// the application of the copy_kernel around these dimensions.
template <typename Dtype>
void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const vector<int>& offsets,
vector<int> indices,
int cur_dim,
const Dtype* src_data,
Dtype* dest_data,
bool is_forward) {
if (cur_dim + 2 < top[0]->num_axes()) {
// We are not yet at the final dimension, call copy recursivley
for (int i = 0; i < top[0]->shape(cur_dim); ++i) {
indices[cur_dim] = i;
crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1,
src_data, dest_data, is_forward);
}
} else {
// We are at the last two dimensions, which are stored continously in memory
// With (N,C,H,W)
// (0,1,2,3) cur_dim -> H
// cur_dim+1 -> W
const int lines = top[0]->shape(cur_dim);
const int height = top[0]->shape(cur_dim);
const int width = top[0]->shape(cur_dim+1);
std::vector<int> ind_off(cur_dim+2, 0);
for (int j = 0; j < cur_dim; ++j) {
ind_off[j] = indices[j] + offsets[j];
}
ind_off[cur_dim] = offsets[cur_dim];
ind_off[cur_dim+1] = offsets[cur_dim+1];
// Compute copy strides
const int src_outer_stride =
bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1);
const int src_inner_stride = bottom[0]->shape(cur_dim+1);
const int dest_outer_stride =
top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1);
const int dest_inner_stride = top[0]->shape(cur_dim+1);
if (is_forward) {
const Dtype* bottom_data = bottom[0]->gpu_data() +
bottom[0]->offset(ind_off);
Dtype* top_data = top[0]->mutable_gpu_data() +
top[0]->offset(indices);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>(
lines, height, width,
src_outer_stride, src_inner_stride,
dest_outer_stride, dest_inner_stride,
bottom_data, top_data);
} else {
const Dtype* top_diff = top[0]->gpu_diff() +
top[0]->offset(indices);
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() +
bottom[0]->offset(ind_off);
// NOLINT_NEXT_LINE(whitespace/operators)
copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>(
lines, height, width,
dest_outer_stride, dest_inner_stride,
src_outer_stride, src_inner_stride,
top_diff, bottom_diff);
}
}
}
template <typename Dtype>
void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
std::vector<int> indices(top[0]->num_axes(), 0);
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true);
}
template <typename Dtype>
void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (propagate_down[0]) {
caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff);
std::vector<int> indices(top[0]->num_axes(), 0);
crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff,
false);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CropLayer);
} // namespace caffe9
|
c48032d28eacf154bd3b4b4e53ac4854a1fca8b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "timer.h"
#include <iostream>
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result) {
if (result != hipSuccess) {
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(1);
}
}
__global__ void vectorAddKernel(float* A, float* B, float* Result) {
// code here!
int i = threadIdx.x + blockDim.x * blockIdx.x;
Result[i] = A[i] + B[i];
}
void vectorAddCuda(int n, float* a, float* b, float* result) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
float* deviceA = NULL;
checkCudaCall(hipMalloc((void **) &deviceA, n * sizeof(float)));
if (deviceA == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
float* deviceB = NULL;
checkCudaCall(hipMalloc((void **) &deviceB, n * sizeof(float)));
if (deviceB == NULL) {
checkCudaCall(hipFree(deviceA));
cout << "could not allocate memory!" << endl;
return;
}
float* deviceResult = NULL;
checkCudaCall(hipMalloc((void **) &deviceResult, n * sizeof(float)));
if (deviceResult == NULL) {
checkCudaCall(hipFree(deviceA));
checkCudaCall(hipFree(deviceB));
cout << "could not allocate memory!" << endl;
return;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// copy the original vectors to the GPU
checkCudaCall(hipMemcpy(deviceA, a, n*sizeof(float), hipMemcpyHostToDevice));
checkCudaCall(hipMemcpy(deviceB, b, n*sizeof(float), hipMemcpyHostToDevice));
// execute kernel
hipEventRecord(start, 0);
hipLaunchKernelGGL(( vectorAddKernel), dim3(n/threadBlockSize+1), dim3(threadBlockSize), 0, 0, deviceA, deviceB, deviceResult);
hipEventRecord(stop, 0);
// check whether the kernel invocation was successful
checkCudaCall(hipGetLastError());
// copy result back
checkCudaCall(hipMemcpy(result, deviceResult, n * sizeof(float), hipMemcpyDeviceToHost));
checkCudaCall(hipFree(deviceA));
checkCudaCall(hipFree(deviceB));
checkCudaCall(hipFree(deviceResult));
// print the time the kernel invocation took, without the copies!
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
cout << "kernel invocation took \t\t" << elapsedTime << " milliseconds" << endl;
}
int main(int argc, char* argv[]) {
int n = 1000000;
timer vectorAddTimer("vector add timer");
float* a = new float[n];
float* b = new float[n];
float* result = new float[n];
float* result_s = new float[n];
// initialize the vectors.
for(int i=0; i<n; i++) {
a[i] = i;
b[i] = i;
result_s[i]=a[i]+b[i];
}
vectorAddTimer.start();
vectorAddCuda(n, a, b, result);
vectorAddTimer.stop();
cout << "vector-add (CUDA, total): \t\t" << vectorAddTimer << endl;
// verify the resuls
for(int i=0; i<n; i++) {
if(result[i] != result_s[i]) {
cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << (2*i) << endl;
exit(1);
}
}
cout << "results OK!" << endl;
delete[] a;
delete[] b;
delete[] result;
return 0;
}
| c48032d28eacf154bd3b4b4e53ac4854a1fca8b2.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "timer.h"
#include <iostream>
using namespace std;
/* Utility function, use to do error checking.
Use this function like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(1);
}
}
__global__ void vectorAddKernel(float* A, float* B, float* Result) {
// code here!
int i = threadIdx.x + blockDim.x * blockIdx.x;
Result[i] = A[i] + B[i];
}
void vectorAddCuda(int n, float* a, float* b, float* result) {
int threadBlockSize = 512;
// allocate the vectors on the GPU
float* deviceA = NULL;
checkCudaCall(cudaMalloc((void **) &deviceA, n * sizeof(float)));
if (deviceA == NULL) {
cout << "could not allocate memory!" << endl;
return;
}
float* deviceB = NULL;
checkCudaCall(cudaMalloc((void **) &deviceB, n * sizeof(float)));
if (deviceB == NULL) {
checkCudaCall(cudaFree(deviceA));
cout << "could not allocate memory!" << endl;
return;
}
float* deviceResult = NULL;
checkCudaCall(cudaMalloc((void **) &deviceResult, n * sizeof(float)));
if (deviceResult == NULL) {
checkCudaCall(cudaFree(deviceA));
checkCudaCall(cudaFree(deviceB));
cout << "could not allocate memory!" << endl;
return;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// copy the original vectors to the GPU
checkCudaCall(cudaMemcpy(deviceA, a, n*sizeof(float), cudaMemcpyHostToDevice));
checkCudaCall(cudaMemcpy(deviceB, b, n*sizeof(float), cudaMemcpyHostToDevice));
// execute kernel
cudaEventRecord(start, 0);
vectorAddKernel<<<n/threadBlockSize+1, threadBlockSize>>>(deviceA, deviceB, deviceResult);
cudaEventRecord(stop, 0);
// check whether the kernel invocation was successful
checkCudaCall(cudaGetLastError());
// copy result back
checkCudaCall(cudaMemcpy(result, deviceResult, n * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaCall(cudaFree(deviceA));
checkCudaCall(cudaFree(deviceB));
checkCudaCall(cudaFree(deviceResult));
// print the time the kernel invocation took, without the copies!
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "kernel invocation took \t\t" << elapsedTime << " milliseconds" << endl;
}
int main(int argc, char* argv[]) {
int n = 1000000;
timer vectorAddTimer("vector add timer");
float* a = new float[n];
float* b = new float[n];
float* result = new float[n];
float* result_s = new float[n];
// initialize the vectors.
for(int i=0; i<n; i++) {
a[i] = i;
b[i] = i;
result_s[i]=a[i]+b[i];
}
vectorAddTimer.start();
vectorAddCuda(n, a, b, result);
vectorAddTimer.stop();
cout << "vector-add (CUDA, total): \t\t" << vectorAddTimer << endl;
// verify the resuls
for(int i=0; i<n; i++) {
if(result[i] != result_s[i]) {
cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << (2*i) << endl;
exit(1);
}
}
cout << "results OK!" << endl;
delete[] a;
delete[] b;
delete[] result;
return 0;
}
|
965487f59cc877500f1cfdc7bc40d90fca61cb41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Core/Geometry.h>
#include <Core/Camera.h>
#include <Core/Scene.h>
#include <Core/Image.h>
#include <Core/Medium.h>
#include <Core/BVH.h>
#include <Shapes/Sphere.h>
#include <Utility/MathUtility.h>
#include <Core/Include.h>
// Define this to turn on error checking
//#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(hipError_t err, const char *file, const int line) {
#ifdef CUDA_ERROR_CHECK
if (hipSuccess != err) {
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line) {
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if (hipSuccess != err) {
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
//////////////////////////////////////////////////////////////////////////
// Settings, TODO: Move to proper places
//////////////////////////////////////////////////////////////////////////
#define BLOCK_SIZE 128
#define MAX_RAY_DEPTH 41 // Should probably be part of the HRenderer
#define BVH_STACK_SIZE 64
#define STREAM_COMPACTION
using namespace HMathUtility;
// Used to convert color to a format that OpenGL can display
// Represents the color in memory as either 1 float or 4 chars (32 bits)
union HColor {
float value;
struct { unsigned char x, y, z, w; } components;
};
// Stores computed Fresnel reflection and transmission
struct HFresnel {
float reflection;
float transmission;
};
// Stream compaction predicate
struct IsNegative {
__host__ __device__ bool operator()(const int &x) { return x < 0; }
};
namespace HKernels {
//////////////////////////////////////////////////////////////////////////
// Device Kernels
//////////////////////////////////////////////////////////////////////////
__device__ glm::vec3 HemisphereCosSample(const glm::vec3 &normal,
const float r1,
const float r2) {
float c = sqrtf(r1);
float phi = r2 * M_2PI;
glm::vec3 w = fabs(normal.x) < M_SQRT1_3 ? glm::vec3(1, 0, 0) : (fabs(normal.y) < M_SQRT1_3 ? glm::vec3(0, 1, 0) : glm::vec3(0, 0, 1));
glm::vec3 u = normalize(cross(normal, w));
glm::vec3 v = cross(normal, u);
return sqrtf(1.0f - r1) * normal + (cosf(phi) * c * u) + (sinf(phi) * c * v);
}
__device__ glm::vec3 ScatterSample(const float r1, const float r2) {
float cosTheta = 2.0f*r1 - 1.0f;
float sinTheta = sqrtf(1 - cosTheta*cosTheta);
float phi = M_2PI * r2;
return glm::vec3(cosTheta, cosf(phi)*sinTheta, sinf(phi)*sinTheta);
}
__device__ glm::vec3 Transmission(glm::vec3 absorptionMultiplier, float distance) {
glm::vec3 res;
res.x = powf(M_E, -absorptionMultiplier.x * distance);
res.y = powf(M_E, -absorptionMultiplier.y * distance);
res.z = powf(M_E, -absorptionMultiplier.z * distance);
return res;
}
__device__ inline glm::vec3 ReflectionDir(const glm::vec3 &normal,
const glm::vec3 &incident) {
return 2.0f * dot(normal, incident) * normal - incident;
}
__device__ glm::vec3 TransmissionDir(const glm::vec3 &normal,
const glm::vec3 &incident,
float eta1, float eta2) {
float cosTheta1 = dot(normal, incident);
float r = eta1 / eta2;
float radicand = 1.0f - powf(r, 2.0f) * (1.0f - powf(cosTheta1, 2.0f));
if (radicand < 0.0f) { // total internal reflection
return glm::vec3(0.0f); //temp, dont know what to do here
}
float cosTheta2 = sqrtf(radicand);
return r*(-1.0f*incident) + (r*cosTheta1 - cosTheta2)*normal;
}
__device__ HFresnel fresnelEquations(const glm::vec3 &normal,
const glm::vec3 &incidentDir,
float eta1, float eta2,
const glm::vec3 &reflectionDir,
const glm::vec3 &transmissionDir) {
HFresnel fresnel;
float cosTheta1 = dot(normal, incidentDir);
float cosTheta2 = dot(-normal, transmissionDir);
float s1 = eta1*cosTheta1;
float s2 = eta2*cosTheta2;
float p1 = eta1*cosTheta2;
float p2 = eta2*cosTheta1;
// Average s- and p-polarization
fresnel.reflection = 0.5f*(powf((s1-s2)/(s1+s2), 2.0f) + powf((p1-p2)/(p1+p2), 2.0f));
fresnel.transmission = 1.0f - fresnel.reflection;
return fresnel;
}
__device__ void TraverseBVH(HRay& ray,
BVHNode* root,
float& t,
HSurfaceInteraction& intersection,
HTriangle* triangles,
int& nearestTriIdx) {
BVHNode* stack[64];
BVHNode** stackPtr = stack;
*stackPtr++ = nullptr;
BVHNode* node = root;
do {
BVHNode* leftChild = node->leftChild;
BVHNode* rightChild = node->rightChild;
bool intersectsLeft = leftChild->boundingBox.Intersect(ray);
bool intersectsRight = rightChild->boundingBox.Intersect(ray);
bool isLeafLeft = leftChild->IsLeaf();
bool isLeafRight = rightChild->IsLeaf();
// TODO: Important! Don't traverse sub-box if we have found a triangle
// intersection that is closer than hit point on box
if (intersectsLeft && isLeafLeft) {
if (triangles[leftChild->triangleIdx].Intersect(ray, t, intersection)) {
nearestTriIdx = leftChild->triangleIdx;
}
}
if (intersectsRight && isLeafRight) {
if (triangles[rightChild->triangleIdx].Intersect(ray, t, intersection)) {
nearestTriIdx = rightChild->triangleIdx;
}
}
bool traverseLeft = intersectsLeft && !isLeafLeft;
bool traverseRight = intersectsRight && !isLeafRight;
if (!traverseLeft && !traverseRight) {
node = *--stackPtr;
}
else {
node = (traverseLeft) ? leftChild : rightChild;
if (traverseLeft && traverseRight) {
*stackPtr++ = rightChild;
}
}
} while (node != nullptr);
}
__device__ void VisualizeBVH(HRay& ray,
BVHNode* root,
glm::vec3* colorMask,
int pixelIdx) {
BVHNode* stack[64];
BVHNode** stackPtr = stack;
*stackPtr++ = nullptr;
BVHNode* node = root;
do {
BVHNode* leftChild = node->leftChild;
BVHNode* rightChild = node->rightChild;
bool intersectsLeft = leftChild->boundingBox.Intersect(ray);
bool intersectsRight = rightChild->boundingBox.Intersect(ray);
bool isLeafLeft = leftChild->IsLeaf();
bool isLeafRight = rightChild->IsLeaf();
if (intersectsLeft && isLeafLeft) {
colorMask[pixelIdx] *= 0.95f;
}
if (intersectsRight && isLeafRight) {
colorMask[pixelIdx] *= 0.95f;
}
bool traverseLeft = intersectsLeft && !isLeafLeft;
bool traverseRight = intersectsRight && !isLeafRight;
if (!traverseLeft && !traverseRight) {
node = *--stackPtr;
}
else {
colorMask[pixelIdx] *= 0.95f;
node = (traverseLeft) ? leftChild : rightChild;
if (traverseLeft && traverseRight) {
*stackPtr++ = rightChild;
}
}
} while (node != nullptr);
}
__device__ void TraverseBVHRecursive(HRay& ray,
BVHNode* currentNode,
float& t,
HSurfaceInteraction& intersection,
HTriangle* triangles,
int numTriangles,
int& nearestTriIdx,
glm::vec3* colorMask,
int i) {
if (currentNode->boundingBox.Intersect(ray)) {
colorMask[i] *= 0.8f;
if (currentNode->IsLeaf()) {
if (triangles[currentNode->triangleIdx].Intersect(ray, t, intersection)) {
nearestTriIdx = currentNode->triangleIdx;
}
}
else {
BVHNode* leftChild = currentNode->leftChild;
BVHNode* rightChild = currentNode->rightChild;
TraverseBVHRecursive(ray, leftChild, t, intersection, triangles, numTriangles, nearestTriIdx, colorMask, i);
TraverseBVHRecursive(ray, rightChild, t, intersection, triangles, numTriangles, nearestTriIdx, colorMask, i);
}
}
}
//////////////////////////////////////////////////////////////////////////
// Global Kernels
//////////////////////////////////////////////////////////////////////////
__global__ void InitData(unsigned int numPixels,
int* livePixels,
glm::vec3* colorMask,
glm::vec3* accumulatedColor) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numPixels) {
livePixels[i] = i;
colorMask[i] = glm::vec3(1.0f, 1.0f, 1.0f);
accumulatedColor[i] = glm::vec3(0.0f, 0.0f, 0.0f);
}
}
__global__ void InitCameraRays(HRay* rays,
HCameraData* cameraData,
unsigned int currentSeed) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < cameraData->resolution.x*cameraData->resolution.y) {
int x = i % cameraData->resolution.x;
int y = cameraData->resolution.y - (i - x) / cameraData->resolution.x - 1;
// Store camera coordinate system
glm::vec3 position = cameraData->position;
glm::vec3 forward = cameraData->forward;
glm::vec3 right = cameraData->right;
glm::vec3 up = cameraData->up;
// Initialize random number generator
thrust::default_random_engine rng(currentSeed + TWHash(i));
thrust::uniform_real_distribution<float> uniform(0.0f, 1.0f);
// Generate random pixel offsets for anti-aliasing
// Expected value is 0.5 i.e. middle of pixel
float dx = uniform(rng) - 0.5f;
float dy = uniform(rng) - 0.5f;
// Compute point on image plane and account for focal distance
glm::vec3 pointOnImagePlane = position + (forward
+ (2.0f * (dx + x) / (cameraData->resolution.x - 1.0f) - 1.0f) * right * tanf(cameraData->FOV.x * M_PI_2 * M_1_180)
+ (2.0f * (dy + y) / (cameraData->resolution.y - 1.0f) - 1.0f) * up * tanf(cameraData->FOV.y * M_PI_2 * M_1_180))
* cameraData->focalDistance;
float apertureRadius = cameraData->apertureRadius;
if (apertureRadius > M_EPSILON) {
// Sample a point on the aperture
float angle = M_2PI * uniform(rng);
float distance = apertureRadius * sqrtf(uniform(rng));
position += (cosf(angle) * right + sinf(angle) * up) * distance;
}
// Initialize ray
HRay ray;
ray.origin = position;
ray.direction = normalize(pointOnImagePlane - position);
ray.enteredMedium = HMedium();
ray.currentMedium = HMedium();
ray.transmitted = false;
rays[i] = ray;
}
}
__global__ void TraceKernel(glm::vec3* accumulatedColor,
glm::vec3* colorMask,
int numLivePixels,
int* livePixels,
HRay* rays,
HSphere* spheres,
int numSpheres,
BVHNode* rootNode,
HTriangle* triangles,
int numTriangles,
int currentSeed) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
#if !(defined(_WIN64) && defined(STREAM_COMPACTION))
if (livePixels[i] == -1) return;
#endif
if (i < numLivePixels) {
int pixelIdx = livePixels[i];
HRay currentRay = rays[pixelIdx];
// Initialize random number generator
thrust::default_random_engine rng(TWHash(pixelIdx) * currentSeed);
thrust::uniform_real_distribution<float> uniform(0.0f, 1.0f);
// Intersection variables
float t = M_INF;
HSurfaceInteraction intersection;
int nearestSphereIdx;
int nearestTriIdx;
bool nearestIsTri = true;
//TraverseBVHRecursive(currentRay, rootNode, t, intersection, triangles, numTriangles, nearestTriIdx, colorMask, pixelIdx);
TraverseBVH(currentRay, rootNode, t, intersection, triangles, nearestTriIdx);
//VisualizeBVH(currentRay, rootNode, colorMask, pixelIdx);
//if (t < M_INF) printf("somethingsup\n");
//// Triangle intersection
//for (int triIdx = 0; triIdx < numTriangles; triIdx++) {
// if (triangles[triIdx].Intersect(currentRay, t, intersection)) {
// nearestTriIdx = triIdx;
// }
//}
// Sphere intersection
for (int sphereIdx = 0; sphereIdx < numSpheres; sphereIdx++) {
if (spheres[sphereIdx].Intersect(currentRay, t, intersection)) {
nearestSphereIdx = sphereIdx;
nearestIsTri = false;
}
}
// Subsurface scattering.
HScatteringProperties scattering = currentRay.currentMedium.scatteringProperties;
if (scattering.reducedScatteringCoefficient > 0 ||
dot(scattering.absorptionCoefficient, scattering.absorptionCoefficient) > M_EPSILON) {
float scatteringDistance = -log(uniform(rng)) / scattering.reducedScatteringCoefficient;
if (scatteringDistance < t) {
// Scattering
currentRay.origin = currentRay(scatteringDistance);
currentRay.direction = ScatterSample(uniform(rng), uniform(rng));
rays[pixelIdx] = currentRay;
// Absorption
colorMask[pixelIdx] *= Transmission(scattering.absorptionCoefficient, scatteringDistance);
if (length(colorMask[pixelIdx]) < M_EPSILON) {
// Mark ray for termination
livePixels[i] = -1;
}
return;
}
else {
// Absorption
colorMask[pixelIdx] *= Transmission(scattering.absorptionCoefficient, t);
}
}
// Treat intersection
if (t < M_INF) {
// Retreive intersection material
HMaterial material;
if (nearestIsTri) {
material = triangles[nearestTriIdx].material;
}
else {
material = spheres[nearestSphereIdx].material;
}
// TODO: Handle roundoff errors properly to avoid self-intersection instead of a fixed offset
// See PBRT v3, new chapter draft @http://pbrt.org/fp-error-section.pdf
glm::vec3 incidentDir = -currentRay.direction;
// TEMP Backface checking and normal flipping:
if (dot(incidentDir, intersection.normal) < 0.0f) {
intersection.normal = -1.0f * intersection.normal;
}
// TODO: After an intersection is found, do the scattering in a separate kernel instead
HMedium incidentMedium = currentRay.currentMedium;
HMedium transmittedMedium;
// Here we handle the assigning of medium based on if the ray has been transmitted or not
// We assume that all transmissive materials are closed disjoint manifolds
if (currentRay.transmitted) {
// Ray is coming from inside of the object it has entered
transmittedMedium = currentRay.enteredMedium;
}
else {
// Ray is approaching an object
transmittedMedium = material.medium;
}
// Compute reflection and transmission directions using Snell's law
glm::vec3 reflectionDir = ReflectionDir(intersection.normal, incidentDir);
glm::vec3 transmissionDir = TransmissionDir(intersection.normal, incidentDir,
incidentMedium.eta,
transmittedMedium.eta);
// Russian roulette sampling of specular reflection
bool doReflect = (material.materialType & SPECULAR) &&
(uniform(rng) < fresnelEquations(intersection.normal,
incidentDir,
incidentMedium.eta,
transmittedMedium.eta,
reflectionDir,
transmissionDir).reflection);
// Based on defined material properties, scatter the ray
if (doReflect || material.materialType & REFLECTION) { // reflection
colorMask[pixelIdx] *= material.specular;
currentRay.origin = intersection.position + 0.001f * intersection.normal;
currentRay.direction = reflectionDir;
rays[pixelIdx] = currentRay;
}
else if (material.materialType & TRANSMISSION) { // transmission
currentRay.origin = intersection.position - 0.001f * intersection.normal;
currentRay.direction = transmissionDir;
currentRay.enteredMedium = currentRay.currentMedium;
currentRay.currentMedium = transmittedMedium;
currentRay.transmitted = !currentRay.transmitted;
rays[pixelIdx] = currentRay;
}
else { // diffuse
accumulatedColor[pixelIdx] += colorMask[pixelIdx] * material.emission;
colorMask[pixelIdx] *= material.diffuse;
// Compute new ray direction and origin
currentRay.origin = intersection.position + 0.001f * intersection.normal;
currentRay.direction = HemisphereCosSample(intersection.normal,
uniform(rng),
uniform(rng));
rays[pixelIdx] = currentRay;
}
}
else {
// The ray didn't intersect the scene, add background color and terminate ray
accumulatedColor[pixelIdx] += colorMask[pixelIdx] * 0.5f * (1.0f - 0.5f * fabs(dot(currentRay.direction, glm::vec3(0.0f, 1.0f, 0.0f)))) * glm::vec3(0.69f, 0.86f, 0.89f);
colorMask[pixelIdx] = glm::vec3(0.0f);
}
if (length(colorMask[pixelIdx]) < M_EPSILON) {
// Mark ray for termination
livePixels[i] = -1;
}
}
}
__global__ void AccumulateKernel(glm::vec3* pixels,
glm::vec3* accumulationBuffer,
glm::vec3* accumulatedColor,
HCameraData* cameraData,
unsigned int passCounter) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < cameraData->resolution.x * cameraData->resolution.y) {
int x = i % cameraData->resolution.x;
int y = cameraData->resolution.y - (i - x) / cameraData->resolution.x - 1;
accumulationBuffer[i] = (accumulationBuffer[i] * (float)(passCounter - 1) + accumulatedColor[i]) / (float)passCounter;
// Convert to 32-bit color
HColor color;
color.components.x = (unsigned char)(powf(clamp(accumulationBuffer[i].x, 0.0f, 1.0f), 1 / 2.2f) * 255);
color.components.y = (unsigned char)(powf(clamp(accumulationBuffer[i].y, 0.0f, 1.0f), 1 / 2.2f) * 255);
color.components.z = (unsigned char)(powf(clamp(accumulationBuffer[i].z, 0.0f, 1.0f), 1 / 2.2f) * 255);
color.components.w = 1;
// Pass pixel coordinates and pixel color in OpenGL to output buffer
pixels[i] = glm::vec3(x, y, color.value);
}
}
__global__ void SavePNG(unsigned char* colorBytes,
glm::vec3* pixels,
unsigned int width,
unsigned int height) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < width*height) {
HColor color;
color.value = pixels[i].z;
colorBytes[3 * i] = (unsigned char)color.components.x;
colorBytes[3 * i + 1] = (unsigned char)color.components.y;
colorBytes[3 * i + 2] = (unsigned char)color.components.z;
}
}
//////////////////////////////////////////////////////////////////////////
// External CUDA access launch function
//////////////////////////////////////////////////////////////////////////
extern "C" void LaunchRenderKernel(HImage* image,
glm::vec3* accumulatedColor,
glm::vec3* colorMask,
HCameraData* cameraData,
unsigned int passCounter,
HRay* rays,
HSphere* spheres,
unsigned int numSpheres,
BVHNode* rootNode,
HTriangle* triangles,
int numTriangles) {
unsigned int blockSize = BLOCK_SIZE;
unsigned int gridSize = (image->numPixels + blockSize - 1) / blockSize;
unsigned int numLivePixels = image->numPixels;
int* livePixels = nullptr;
// Inefficient to do this every call but fine until I figure out
// how to resize allocated memory on device (after stream compaction)
checkCudaErrors(hipMalloc(&livePixels, image->numPixels*sizeof(int)));
// Initialize ray properties
hipLaunchKernelGGL(( InitData), dim3(gridSize), dim3(blockSize), 0, 0, numLivePixels,
livePixels,
colorMask,
accumulatedColor);
CudaCheckError();
// Generate new seed each millisecond from system time and ray depth
SYSTEMTIME time;
GetSystemTime(&time);
long time_ms = (time.wSecond * 1000) + time.wMilliseconds;
int hashedPassCounter = TWHash(passCounter);
int currentSeed = hashedPassCounter + TWHash(time_ms);
// Generate initial rays from camera
hipLaunchKernelGGL(( InitCameraRays), dim3(gridSize), dim3(blockSize), 0, 0, rays,
cameraData,
currentSeed);
CudaCheckError();
// Trace surviving rays until none left or maximum depth reached
unsigned int newGridSize;
for (int rayDepth = 0; rayDepth < MAX_RAY_DEPTH; rayDepth++) {
// Compute new grid size accounting for number of live pixels
newGridSize = (numLivePixels + blockSize - 1) / blockSize;
// Generate new seed each millisecond from system time and ray depth
GetSystemTime(&time);
long time_ms = (time.wSecond * 1000) + time.wMilliseconds;
currentSeed = hashedPassCounter + TWHash(time_ms) + rayDepth;
// Ray propagation kernel
hipLaunchKernelGGL(( TraceKernel), dim3(newGridSize), dim3(blockSize), 0, 0, accumulatedColor,
colorMask,
numLivePixels,
livePixels,
rays,
spheres,
numSpheres,
rootNode,
triangles,
numTriangles,
currentSeed);
CudaCheckError();
// Remove terminated rays with stream compaction
#if defined(_WIN64) && defined(STREAM_COMPACTION)
thrust::device_ptr<int> devPtr(livePixels);
thrust::device_ptr<int> endPtr = thrust::remove_if(devPtr, devPtr + numLivePixels, IsNegative());
numLivePixels = endPtr.get() - livePixels;
#endif
#ifndef NDEBUG
if (passCounter == 1) {
std::cout << "Current Ray depth: " << rayDepth << std::endl;
std::cout << "Number of live rays: " << numLivePixels << std::endl;
std::cout << "Number of thread blocks: " << newGridSize << std::endl;
}
#endif // NDEBUG
}
// TODO: Move the accumulation and OpenGL interoperability into the core loop somehow
// Perform color conversion and gamma correction and pass computed colors to image
hipLaunchKernelGGL(( AccumulateKernel), dim3(gridSize), dim3(blockSize), 0, 0, image->pixels,
image->accumulationBuffer,
accumulatedColor,
cameraData,
passCounter);
CudaCheckError();
checkCudaErrors(hipFree(livePixels));
}
extern "C" void LaunchSavePNGKernel(unsigned char* colorBytes,
glm::vec3* pixels,
unsigned int width,
unsigned int height) {
unsigned int blockSize = BLOCK_SIZE;
unsigned int gridSize = (width*height + blockSize - 1) / blockSize;
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( SavePNG), dim3(gridSize), dim3(blockSize), 0, 0, colorBytes,
pixels,
width,
height);
checkCudaErrors(hipDeviceSynchronize());
}
}
| 965487f59cc877500f1cfdc7bc40d90fca61cb41.cu | #include <Core/Geometry.h>
#include <Core/Camera.h>
#include <Core/Scene.h>
#include <Core/Image.h>
#include <Core/Medium.h>
#include <Core/BVH.h>
#include <Shapes/Sphere.h>
#include <Utility/MathUtility.h>
#include <Core/Include.h>
// Define this to turn on error checking
//#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(cudaError err, const char *file, const int line) {
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err) {
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line) {
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
//////////////////////////////////////////////////////////////////////////
// Settings, TODO: Move to proper places
//////////////////////////////////////////////////////////////////////////
#define BLOCK_SIZE 128
#define MAX_RAY_DEPTH 41 // Should probably be part of the HRenderer
#define BVH_STACK_SIZE 64
#define STREAM_COMPACTION
using namespace HMathUtility;
// Used to convert color to a format that OpenGL can display
// Represents the color in memory as either 1 float or 4 chars (32 bits)
union HColor {
float value;
struct { unsigned char x, y, z, w; } components;
};
// Stores computed Fresnel reflection and transmission
struct HFresnel {
float reflection;
float transmission;
};
// Stream compaction predicate
struct IsNegative {
__host__ __device__ bool operator()(const int &x) { return x < 0; }
};
namespace HKernels {
//////////////////////////////////////////////////////////////////////////
// Device Kernels
//////////////////////////////////////////////////////////////////////////
__device__ glm::vec3 HemisphereCosSample(const glm::vec3 &normal,
const float r1,
const float r2) {
float c = sqrtf(r1);
float phi = r2 * M_2PI;
glm::vec3 w = fabs(normal.x) < M_SQRT1_3 ? glm::vec3(1, 0, 0) : (fabs(normal.y) < M_SQRT1_3 ? glm::vec3(0, 1, 0) : glm::vec3(0, 0, 1));
glm::vec3 u = normalize(cross(normal, w));
glm::vec3 v = cross(normal, u);
return sqrtf(1.0f - r1) * normal + (cosf(phi) * c * u) + (sinf(phi) * c * v);
}
__device__ glm::vec3 ScatterSample(const float r1, const float r2) {
float cosTheta = 2.0f*r1 - 1.0f;
float sinTheta = sqrtf(1 - cosTheta*cosTheta);
float phi = M_2PI * r2;
return glm::vec3(cosTheta, cosf(phi)*sinTheta, sinf(phi)*sinTheta);
}
__device__ glm::vec3 Transmission(glm::vec3 absorptionMultiplier, float distance) {
glm::vec3 res;
res.x = powf(M_E, -absorptionMultiplier.x * distance);
res.y = powf(M_E, -absorptionMultiplier.y * distance);
res.z = powf(M_E, -absorptionMultiplier.z * distance);
return res;
}
__device__ inline glm::vec3 ReflectionDir(const glm::vec3 &normal,
const glm::vec3 &incident) {
return 2.0f * dot(normal, incident) * normal - incident;
}
__device__ glm::vec3 TransmissionDir(const glm::vec3 &normal,
const glm::vec3 &incident,
float eta1, float eta2) {
float cosTheta1 = dot(normal, incident);
float r = eta1 / eta2;
float radicand = 1.0f - powf(r, 2.0f) * (1.0f - powf(cosTheta1, 2.0f));
if (radicand < 0.0f) { // total internal reflection
return glm::vec3(0.0f); //temp, dont know what to do here
}
float cosTheta2 = sqrtf(radicand);
return r*(-1.0f*incident) + (r*cosTheta1 - cosTheta2)*normal;
}
__device__ HFresnel fresnelEquations(const glm::vec3 &normal,
const glm::vec3 &incidentDir,
float eta1, float eta2,
const glm::vec3 &reflectionDir,
const glm::vec3 &transmissionDir) {
HFresnel fresnel;
float cosTheta1 = dot(normal, incidentDir);
float cosTheta2 = dot(-normal, transmissionDir);
float s1 = eta1*cosTheta1;
float s2 = eta2*cosTheta2;
float p1 = eta1*cosTheta2;
float p2 = eta2*cosTheta1;
// Average s- and p-polarization
fresnel.reflection = 0.5f*(powf((s1-s2)/(s1+s2), 2.0f) + powf((p1-p2)/(p1+p2), 2.0f));
fresnel.transmission = 1.0f - fresnel.reflection;
return fresnel;
}
__device__ void TraverseBVH(HRay& ray,
BVHNode* root,
float& t,
HSurfaceInteraction& intersection,
HTriangle* triangles,
int& nearestTriIdx) {
BVHNode* stack[64];
BVHNode** stackPtr = stack;
*stackPtr++ = nullptr;
BVHNode* node = root;
do {
BVHNode* leftChild = node->leftChild;
BVHNode* rightChild = node->rightChild;
bool intersectsLeft = leftChild->boundingBox.Intersect(ray);
bool intersectsRight = rightChild->boundingBox.Intersect(ray);
bool isLeafLeft = leftChild->IsLeaf();
bool isLeafRight = rightChild->IsLeaf();
// TODO: Important! Don't traverse sub-box if we have found a triangle
// intersection that is closer than hit point on box
if (intersectsLeft && isLeafLeft) {
if (triangles[leftChild->triangleIdx].Intersect(ray, t, intersection)) {
nearestTriIdx = leftChild->triangleIdx;
}
}
if (intersectsRight && isLeafRight) {
if (triangles[rightChild->triangleIdx].Intersect(ray, t, intersection)) {
nearestTriIdx = rightChild->triangleIdx;
}
}
bool traverseLeft = intersectsLeft && !isLeafLeft;
bool traverseRight = intersectsRight && !isLeafRight;
if (!traverseLeft && !traverseRight) {
node = *--stackPtr;
}
else {
node = (traverseLeft) ? leftChild : rightChild;
if (traverseLeft && traverseRight) {
*stackPtr++ = rightChild;
}
}
} while (node != nullptr);
}
__device__ void VisualizeBVH(HRay& ray,
BVHNode* root,
glm::vec3* colorMask,
int pixelIdx) {
BVHNode* stack[64];
BVHNode** stackPtr = stack;
*stackPtr++ = nullptr;
BVHNode* node = root;
do {
BVHNode* leftChild = node->leftChild;
BVHNode* rightChild = node->rightChild;
bool intersectsLeft = leftChild->boundingBox.Intersect(ray);
bool intersectsRight = rightChild->boundingBox.Intersect(ray);
bool isLeafLeft = leftChild->IsLeaf();
bool isLeafRight = rightChild->IsLeaf();
if (intersectsLeft && isLeafLeft) {
colorMask[pixelIdx] *= 0.95f;
}
if (intersectsRight && isLeafRight) {
colorMask[pixelIdx] *= 0.95f;
}
bool traverseLeft = intersectsLeft && !isLeafLeft;
bool traverseRight = intersectsRight && !isLeafRight;
if (!traverseLeft && !traverseRight) {
node = *--stackPtr;
}
else {
colorMask[pixelIdx] *= 0.95f;
node = (traverseLeft) ? leftChild : rightChild;
if (traverseLeft && traverseRight) {
*stackPtr++ = rightChild;
}
}
} while (node != nullptr);
}
__device__ void TraverseBVHRecursive(HRay& ray,
BVHNode* currentNode,
float& t,
HSurfaceInteraction& intersection,
HTriangle* triangles,
int numTriangles,
int& nearestTriIdx,
glm::vec3* colorMask,
int i) {
if (currentNode->boundingBox.Intersect(ray)) {
colorMask[i] *= 0.8f;
if (currentNode->IsLeaf()) {
if (triangles[currentNode->triangleIdx].Intersect(ray, t, intersection)) {
nearestTriIdx = currentNode->triangleIdx;
}
}
else {
BVHNode* leftChild = currentNode->leftChild;
BVHNode* rightChild = currentNode->rightChild;
TraverseBVHRecursive(ray, leftChild, t, intersection, triangles, numTriangles, nearestTriIdx, colorMask, i);
TraverseBVHRecursive(ray, rightChild, t, intersection, triangles, numTriangles, nearestTriIdx, colorMask, i);
}
}
}
//////////////////////////////////////////////////////////////////////////
// Global Kernels
//////////////////////////////////////////////////////////////////////////
__global__ void InitData(unsigned int numPixels,
int* livePixels,
glm::vec3* colorMask,
glm::vec3* accumulatedColor) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numPixels) {
livePixels[i] = i;
colorMask[i] = glm::vec3(1.0f, 1.0f, 1.0f);
accumulatedColor[i] = glm::vec3(0.0f, 0.0f, 0.0f);
}
}
__global__ void InitCameraRays(HRay* rays,
HCameraData* cameraData,
unsigned int currentSeed) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < cameraData->resolution.x*cameraData->resolution.y) {
int x = i % cameraData->resolution.x;
int y = cameraData->resolution.y - (i - x) / cameraData->resolution.x - 1;
// Store camera coordinate system
glm::vec3 position = cameraData->position;
glm::vec3 forward = cameraData->forward;
glm::vec3 right = cameraData->right;
glm::vec3 up = cameraData->up;
// Initialize random number generator
thrust::default_random_engine rng(currentSeed + TWHash(i));
thrust::uniform_real_distribution<float> uniform(0.0f, 1.0f);
// Generate random pixel offsets for anti-aliasing
// Expected value is 0.5 i.e. middle of pixel
float dx = uniform(rng) - 0.5f;
float dy = uniform(rng) - 0.5f;
// Compute point on image plane and account for focal distance
glm::vec3 pointOnImagePlane = position + (forward
+ (2.0f * (dx + x) / (cameraData->resolution.x - 1.0f) - 1.0f) * right * tanf(cameraData->FOV.x * M_PI_2 * M_1_180)
+ (2.0f * (dy + y) / (cameraData->resolution.y - 1.0f) - 1.0f) * up * tanf(cameraData->FOV.y * M_PI_2 * M_1_180))
* cameraData->focalDistance;
float apertureRadius = cameraData->apertureRadius;
if (apertureRadius > M_EPSILON) {
// Sample a point on the aperture
float angle = M_2PI * uniform(rng);
float distance = apertureRadius * sqrtf(uniform(rng));
position += (cosf(angle) * right + sinf(angle) * up) * distance;
}
// Initialize ray
HRay ray;
ray.origin = position;
ray.direction = normalize(pointOnImagePlane - position);
ray.enteredMedium = HMedium();
ray.currentMedium = HMedium();
ray.transmitted = false;
rays[i] = ray;
}
}
__global__ void TraceKernel(glm::vec3* accumulatedColor,
glm::vec3* colorMask,
int numLivePixels,
int* livePixels,
HRay* rays,
HSphere* spheres,
int numSpheres,
BVHNode* rootNode,
HTriangle* triangles,
int numTriangles,
int currentSeed) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
#if !(defined(_WIN64) && defined(STREAM_COMPACTION))
if (livePixels[i] == -1) return;
#endif
if (i < numLivePixels) {
int pixelIdx = livePixels[i];
HRay currentRay = rays[pixelIdx];
// Initialize random number generator
thrust::default_random_engine rng(TWHash(pixelIdx) * currentSeed);
thrust::uniform_real_distribution<float> uniform(0.0f, 1.0f);
// Intersection variables
float t = M_INF;
HSurfaceInteraction intersection;
int nearestSphereIdx;
int nearestTriIdx;
bool nearestIsTri = true;
//TraverseBVHRecursive(currentRay, rootNode, t, intersection, triangles, numTriangles, nearestTriIdx, colorMask, pixelIdx);
TraverseBVH(currentRay, rootNode, t, intersection, triangles, nearestTriIdx);
//VisualizeBVH(currentRay, rootNode, colorMask, pixelIdx);
//if (t < M_INF) printf("somethingsup\n");
//// Triangle intersection
//for (int triIdx = 0; triIdx < numTriangles; triIdx++) {
// if (triangles[triIdx].Intersect(currentRay, t, intersection)) {
// nearestTriIdx = triIdx;
// }
//}
// Sphere intersection
for (int sphereIdx = 0; sphereIdx < numSpheres; sphereIdx++) {
if (spheres[sphereIdx].Intersect(currentRay, t, intersection)) {
nearestSphereIdx = sphereIdx;
nearestIsTri = false;
}
}
// Subsurface scattering.
HScatteringProperties scattering = currentRay.currentMedium.scatteringProperties;
if (scattering.reducedScatteringCoefficient > 0 ||
dot(scattering.absorptionCoefficient, scattering.absorptionCoefficient) > M_EPSILON) {
float scatteringDistance = -log(uniform(rng)) / scattering.reducedScatteringCoefficient;
if (scatteringDistance < t) {
// Scattering
currentRay.origin = currentRay(scatteringDistance);
currentRay.direction = ScatterSample(uniform(rng), uniform(rng));
rays[pixelIdx] = currentRay;
// Absorption
colorMask[pixelIdx] *= Transmission(scattering.absorptionCoefficient, scatteringDistance);
if (length(colorMask[pixelIdx]) < M_EPSILON) {
// Mark ray for termination
livePixels[i] = -1;
}
return;
}
else {
// Absorption
colorMask[pixelIdx] *= Transmission(scattering.absorptionCoefficient, t);
}
}
// Treat intersection
if (t < M_INF) {
// Retreive intersection material
HMaterial material;
if (nearestIsTri) {
material = triangles[nearestTriIdx].material;
}
else {
material = spheres[nearestSphereIdx].material;
}
// TODO: Handle roundoff errors properly to avoid self-intersection instead of a fixed offset
// See PBRT v3, new chapter draft @http://pbrt.org/fp-error-section.pdf
glm::vec3 incidentDir = -currentRay.direction;
// TEMP Backface checking and normal flipping:
if (dot(incidentDir, intersection.normal) < 0.0f) {
intersection.normal = -1.0f * intersection.normal;
}
// TODO: After an intersection is found, do the scattering in a separate kernel instead
HMedium incidentMedium = currentRay.currentMedium;
HMedium transmittedMedium;
// Here we handle the assigning of medium based on if the ray has been transmitted or not
// We assume that all transmissive materials are closed disjoint manifolds
if (currentRay.transmitted) {
// Ray is coming from inside of the object it has entered
transmittedMedium = currentRay.enteredMedium;
}
else {
// Ray is approaching an object
transmittedMedium = material.medium;
}
// Compute reflection and transmission directions using Snell's law
glm::vec3 reflectionDir = ReflectionDir(intersection.normal, incidentDir);
glm::vec3 transmissionDir = TransmissionDir(intersection.normal, incidentDir,
incidentMedium.eta,
transmittedMedium.eta);
// Russian roulette sampling of specular reflection
bool doReflect = (material.materialType & SPECULAR) &&
(uniform(rng) < fresnelEquations(intersection.normal,
incidentDir,
incidentMedium.eta,
transmittedMedium.eta,
reflectionDir,
transmissionDir).reflection);
// Based on defined material properties, scatter the ray
if (doReflect || material.materialType & REFLECTION) { // reflection
colorMask[pixelIdx] *= material.specular;
currentRay.origin = intersection.position + 0.001f * intersection.normal;
currentRay.direction = reflectionDir;
rays[pixelIdx] = currentRay;
}
else if (material.materialType & TRANSMISSION) { // transmission
currentRay.origin = intersection.position - 0.001f * intersection.normal;
currentRay.direction = transmissionDir;
currentRay.enteredMedium = currentRay.currentMedium;
currentRay.currentMedium = transmittedMedium;
currentRay.transmitted = !currentRay.transmitted;
rays[pixelIdx] = currentRay;
}
else { // diffuse
accumulatedColor[pixelIdx] += colorMask[pixelIdx] * material.emission;
colorMask[pixelIdx] *= material.diffuse;
// Compute new ray direction and origin
currentRay.origin = intersection.position + 0.001f * intersection.normal;
currentRay.direction = HemisphereCosSample(intersection.normal,
uniform(rng),
uniform(rng));
rays[pixelIdx] = currentRay;
}
}
else {
// The ray didn't intersect the scene, add background color and terminate ray
accumulatedColor[pixelIdx] += colorMask[pixelIdx] * 0.5f * (1.0f - 0.5f * fabs(dot(currentRay.direction, glm::vec3(0.0f, 1.0f, 0.0f)))) * glm::vec3(0.69f, 0.86f, 0.89f);
colorMask[pixelIdx] = glm::vec3(0.0f);
}
if (length(colorMask[pixelIdx]) < M_EPSILON) {
// Mark ray for termination
livePixels[i] = -1;
}
}
}
__global__ void AccumulateKernel(glm::vec3* pixels,
glm::vec3* accumulationBuffer,
glm::vec3* accumulatedColor,
HCameraData* cameraData,
unsigned int passCounter) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < cameraData->resolution.x * cameraData->resolution.y) {
int x = i % cameraData->resolution.x;
int y = cameraData->resolution.y - (i - x) / cameraData->resolution.x - 1;
accumulationBuffer[i] = (accumulationBuffer[i] * (float)(passCounter - 1) + accumulatedColor[i]) / (float)passCounter;
// Convert to 32-bit color
HColor color;
color.components.x = (unsigned char)(powf(clamp(accumulationBuffer[i].x, 0.0f, 1.0f), 1 / 2.2f) * 255);
color.components.y = (unsigned char)(powf(clamp(accumulationBuffer[i].y, 0.0f, 1.0f), 1 / 2.2f) * 255);
color.components.z = (unsigned char)(powf(clamp(accumulationBuffer[i].z, 0.0f, 1.0f), 1 / 2.2f) * 255);
color.components.w = 1;
// Pass pixel coordinates and pixel color in OpenGL to output buffer
pixels[i] = glm::vec3(x, y, color.value);
}
}
__global__ void SavePNG(unsigned char* colorBytes,
glm::vec3* pixels,
unsigned int width,
unsigned int height) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < width*height) {
HColor color;
color.value = pixels[i].z;
colorBytes[3 * i] = (unsigned char)color.components.x;
colorBytes[3 * i + 1] = (unsigned char)color.components.y;
colorBytes[3 * i + 2] = (unsigned char)color.components.z;
}
}
//////////////////////////////////////////////////////////////////////////
// External CUDA access launch function
//////////////////////////////////////////////////////////////////////////
extern "C" void LaunchRenderKernel(HImage* image,
glm::vec3* accumulatedColor,
glm::vec3* colorMask,
HCameraData* cameraData,
unsigned int passCounter,
HRay* rays,
HSphere* spheres,
unsigned int numSpheres,
BVHNode* rootNode,
HTriangle* triangles,
int numTriangles) {
unsigned int blockSize = BLOCK_SIZE;
unsigned int gridSize = (image->numPixels + blockSize - 1) / blockSize;
unsigned int numLivePixels = image->numPixels;
int* livePixels = nullptr;
// Inefficient to do this every call but fine until I figure out
// how to resize allocated memory on device (after stream compaction)
checkCudaErrors(cudaMalloc(&livePixels, image->numPixels*sizeof(int)));
// Initialize ray properties
InitData<<<gridSize, blockSize>>>(numLivePixels,
livePixels,
colorMask,
accumulatedColor);
CudaCheckError();
// Generate new seed each millisecond from system time and ray depth
SYSTEMTIME time;
GetSystemTime(&time);
long time_ms = (time.wSecond * 1000) + time.wMilliseconds;
int hashedPassCounter = TWHash(passCounter);
int currentSeed = hashedPassCounter + TWHash(time_ms);
// Generate initial rays from camera
InitCameraRays<<<gridSize, blockSize>>>(rays,
cameraData,
currentSeed);
CudaCheckError();
// Trace surviving rays until none left or maximum depth reached
unsigned int newGridSize;
for (int rayDepth = 0; rayDepth < MAX_RAY_DEPTH; rayDepth++) {
// Compute new grid size accounting for number of live pixels
newGridSize = (numLivePixels + blockSize - 1) / blockSize;
// Generate new seed each millisecond from system time and ray depth
GetSystemTime(&time);
long time_ms = (time.wSecond * 1000) + time.wMilliseconds;
currentSeed = hashedPassCounter + TWHash(time_ms) + rayDepth;
// Ray propagation kernel
TraceKernel<<<newGridSize, blockSize>>>(accumulatedColor,
colorMask,
numLivePixels,
livePixels,
rays,
spheres,
numSpheres,
rootNode,
triangles,
numTriangles,
currentSeed);
CudaCheckError();
// Remove terminated rays with stream compaction
#if defined(_WIN64) && defined(STREAM_COMPACTION)
thrust::device_ptr<int> devPtr(livePixels);
thrust::device_ptr<int> endPtr = thrust::remove_if(devPtr, devPtr + numLivePixels, IsNegative());
numLivePixels = endPtr.get() - livePixels;
#endif
#ifndef NDEBUG
if (passCounter == 1) {
std::cout << "Current Ray depth: " << rayDepth << std::endl;
std::cout << "Number of live rays: " << numLivePixels << std::endl;
std::cout << "Number of thread blocks: " << newGridSize << std::endl;
}
#endif // NDEBUG
}
// TODO: Move the accumulation and OpenGL interoperability into the core loop somehow
// Perform color conversion and gamma correction and pass computed colors to image
AccumulateKernel<<<gridSize, blockSize>>>(image->pixels,
image->accumulationBuffer,
accumulatedColor,
cameraData,
passCounter);
CudaCheckError();
checkCudaErrors(cudaFree(livePixels));
}
extern "C" void LaunchSavePNGKernel(unsigned char* colorBytes,
glm::vec3* pixels,
unsigned int width,
unsigned int height) {
unsigned int blockSize = BLOCK_SIZE;
unsigned int gridSize = (width*height + blockSize - 1) / blockSize;
checkCudaErrors(cudaDeviceSynchronize());
SavePNG<<<gridSize, blockSize>>>(colorBytes,
pixels,
width,
height);
checkCudaErrors(cudaDeviceSynchronize());
}
}
|
9f20606f5fb34c3ec3f47d1a02bede0a532b092f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
% Function: scrambler
% Description: scramble bits with psuedo random seq.
% Inputs: bits_h: Binary bits to scramble
% c_h: psuedo random sequence
% Outputs: *scrambledbits_h: Scrambled Bits
By: Ahmad Nour & Mohammed Mostafa
*/
#include "scrambler.cuh"
__global__ void scrabmler(Byte *bits_d, Byte *scrambledbits_d, Byte *c_d, int numThreads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Not to run more threads than available data
if (idx >= numThreads)
return;
scrambledbits_d[idx] = bits_d[idx] ^ c_d[idx];
}
void scrambler(Byte* bits_d, Byte** scrambledbits_d, Byte* c_d, const int N)
{
//Calc. number of needed threads for calling kernel(s)
int numThreads = N;
int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1)
//Calling the kernel(s)
scrabmler << <gridDim, blockDim , 0, stream_default>> > (bits_d, *scrambledbits_d, c_d, N);
}
| 9f20606f5fb34c3ec3f47d1a02bede0a532b092f.cu | /*
% Function: scrambler
% Description: scramble bits with psuedo random seq.
% Inputs: bits_h: Binary bits to scramble
% c_h: psuedo random sequence
% Outputs: *scrambledbits_h: Scrambled Bits
By: Ahmad Nour & Mohammed Mostafa
*/
#include "scrambler.cuh"
__global__ void scrabmler(Byte *bits_d, Byte *scrambledbits_d, Byte *c_d, int numThreads)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Not to run more threads than available data
if (idx >= numThreads)
return;
scrambledbits_d[idx] = bits_d[idx] ^ c_d[idx];
}
void scrambler(Byte* bits_d, Byte** scrambledbits_d, Byte* c_d, const int N)
{
//Calc. number of needed threads for calling kernel(s)
int numThreads = N;
int blockDim = (numThreads < 1024) ? numThreads : 1024; //block size in threads (max 1024 thread)
int gridDim = numThreads / (blockDim)+(numThreads % blockDim == 0 ? 0 : 1); //grid size in bloack (min 1)
//Calling the kernel(s)
scrabmler << <gridDim, blockDim , 0, stream_default>> > (bits_d, *scrambledbits_d, c_d, N);
}
|
106e8832723f3c2510d058d3fb629b60fdf84be5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdafx.h"
#include "cuda_includes.h"
#include "utils.h"
#include "tests.cuh"
#include "time.h"
using namespace std;
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
gpuErrchk(hipMalloc((void**)&dev_c, size * sizeof(int)));
gpuErrchk(hipMalloc((void**)&dev_a, size * sizeof(int)));
gpuErrchk(hipMalloc((void**)&dev_b, size * sizeof(int)));
gpuErrchk(hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( addKernel) , dim3(1), dim3(size) , 0, 0, dev_c, dev_a, dev_b);
gpuErrchk(hipGetLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost));
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
int main(int argc, char *argv[])
{
try {
const unsigned int size = 1000;
std::vector<int> a(size);
std::vector<int> b(size);
std::vector<int> c(size,0);
for (auto i : a) i = rand();
for (auto i : b) i = rand();
gpuErrchk(addWithCuda(&c[0], &a[0], &b[0], size));
gpuErrchk(hipDeviceReset());
std::cout << "end" << std::endl;
getchar();
return 0;
}
catch (std::exception& exc) {
cerr << exc.what() << std::endl;
getchar();
exit(EXIT_FAILURE);
}
return (EXIT_SUCCESS);
} | 106e8832723f3c2510d058d3fb629b60fdf84be5.cu | #include "stdafx.h"
#include "cuda_includes.h"
#include "utils.h"
#include "tests.cuh"
#include "time.h"
using namespace std;
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
gpuErrchk(cudaMalloc((void**)&dev_c, size * sizeof(int)));
gpuErrchk(cudaMalloc((void**)&dev_a, size * sizeof(int)));
gpuErrchk(cudaMalloc((void**)&dev_b, size * sizeof(int)));
gpuErrchk(cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice));
addKernel <<<1, size >>> (dev_c, dev_a, dev_b);
gpuErrchk(cudaGetLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost));
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
int main(int argc, char *argv[])
{
try {
const unsigned int size = 1000;
std::vector<int> a(size);
std::vector<int> b(size);
std::vector<int> c(size,0);
for (auto i : a) i = rand();
for (auto i : b) i = rand();
gpuErrchk(addWithCuda(&c[0], &a[0], &b[0], size));
gpuErrchk(cudaDeviceReset());
std::cout << "end" << std::endl;
getchar();
return 0;
}
catch (std::exception& exc) {
cerr << exc.what() << std::endl;
getchar();
exit(EXIT_FAILURE);
}
return (EXIT_SUCCESS);
} |
fd437aa2a61cfad0f863775520f406aaafc23ad4.hip | // !!! This is a file automatically generated by hipify!!!
#include "matrix.h"
#include <hip/hip_runtime.h>
// Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of
// BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
#define BLOCK_DIM 16
#define NUM_REPS 10
double_matrix *newDoubleMatrix(int rows, int cols) {
if (rows <= 0 || cols <= 0)
return NULL;
// allocate a matrix structure
double_matrix *m = (double_matrix *)malloc(sizeof(double_matrix));
// set dimensions
m->rows = rows;
m->cols = cols;
// allocate a double array of length rows * cols
m->data = (double *)malloc(rows * cols * sizeof(double));
// set all data to 0
int i;
srand(time(NULL)); // Initialization, should only be called once.
for (i = 0; i < rows * cols; i++)
m->data[i] = (double)rand();
return m;
}
int_matrix *newIntMatrix(int rows, int cols) {
if (rows <= 0 || cols <= 0)
return NULL;
// allocate a matrix structure
int_matrix *m = (int_matrix *)malloc(sizeof(int_matrix));
// set dimensions
m->rows = rows;
m->cols = cols;
// allocate a double array of length rows * cols
m->data = (int *)malloc(rows * cols * sizeof(int));
// set all data to 0
int i;
srand(time(NULL)); // Initialization, should only be called once.
for (i = 0; i < rows * cols; i++)
m->data[i] = rand();
return m;
}
// pointer to element in matrix by row and column location
#define ELEM(mtx, row, col) mtx->data[(col - 1) * mtx->rows + (row - 1)]
/* Prints the matrix to stdout. Returns 0 if successful
* and -1 if mtx is NULL.
*/
int printDoubleMatrix(double_matrix *mtx) {
if (!mtx)
return -1;
int row, col;
for (row = 1; row <= mtx->rows; row++) {
for (col = 1; col <= mtx->cols; col++) {
// Print the floating-point element with
// - either a - if negative or a space if positive
// - at least 3 spaces before the .
// - precision to the hundredths place
printf("% 6.2f ", ELEM(mtx, row, col));
}
// separate rows by newlines
printf("\n");
}
return 0;
}
int printIntMatrix(int_matrix *mtx) {
if (!mtx)
return -1;
int row, col;
for (row = 1; row <= mtx->rows; row++) {
for (col = 1; col <= mtx->cols; col++) {
// Print the floating-point element with
// - either a - if negative or a space if positive
// - at least 3 spaces before the .
// - precision to the hundredths place
printf("%d ", ELEM(mtx, row, col));
}
// separate rows by newlines
printf("\n");
}
return 0;
}
/* Writes the transpose of matrix in into matrix out.
* Returns 0 if successful, -1 if either in or out is NULL,
* and -2 if the dimensions of in and out are incompatible.
*/
int transposeDouble(double_matrix *in, double_matrix *out) {
if (!in || !out)
return -1;
if (in->rows != out->cols || in->cols != out->rows)
return -2;
int row, col;
for (row = 1; row <= in->rows; row++)
for (col = 1; col <= in->cols; col++)
ELEM(out, col, row) = ELEM(in, row, col);
return 0;
}
int transposeInt(int_matrix *in, int_matrix *out) {
if (!in || !out)
return -1;
if (in->rows != out->cols || in->cols != out->rows)
return -2;
int row, col;
for (row = 1; row <= in->rows; row++)
for (col = 1; col <= in->cols; col++)
ELEM(out, col, row) = ELEM(in, row, col);
return 0;
}
/* Sets the (row, col) element of mtx to val. Returns 0 if
* successful, -1 if mtx is NULL, and -2 if row or col are
* outside of the dimensions of mtx.
*/
int setDoubleElement(double_matrix *mtx, int row, int col, double val) {
if (!mtx)
return -1;
assert(mtx->data);
if (row <= 0 || row > mtx->rows || col <= 0 || col > mtx->cols)
return -2;
ELEM(mtx, row, col) = val;
return 0;
}
int setIntElement(int_matrix *mtx, int row, int col, int val) {
if (!mtx)
return -1;
assert(mtx->data);
if (row <= 0 || row > mtx->rows || col <= 0 || col > mtx->cols)
return -2;
ELEM(mtx, row, col) = val;
return 0;
}
/* Copies a matrix. Returns NULL if mtx is NULL.
*/
double_matrix *copyDoubleMatrix(double_matrix *mtx) {
if (!mtx)
return NULL;
// create a new matrix to hold the copy
double_matrix *cp = newDoubleMatrix(mtx->rows, mtx->cols);
// copy mtx's data to cp's data
memcpy(cp->data, mtx->data, mtx->rows * mtx->cols * sizeof(double));
return cp;
}
int_matrix *copyIntMatrix(int_matrix *mtx) {
if (!mtx)
return NULL;
// create a new matrix to hold the copy
int_matrix *cp = newIntMatrix(mtx->rows, mtx->cols);
// copy mtx's data to cp's data
memcpy(cp->data, mtx->data, mtx->rows * mtx->cols * sizeof(int));
return cp;
}
/* Deletes a matrix. Returns 0 if successful and -1 if mtx
* is NULL.
*/
int deleteDoubleMatrix(double_matrix *mtx) {
if (!mtx)
return -1;
// free mtx's data
assert(mtx->data);
free(mtx->data);
// free mtx itself
free(mtx);
return 0;
}
int deleteIntMatrix(int_matrix *mtx) {
if (!mtx)
return -1;
// free mtx's data
assert(mtx->data);
free(mtx->data);
// free mtx itself
free(mtx);
return 0;
}
__global__ void transposeDiagonalInt(int *odata, int *idata, int width,
int height) {
__shared__ int tile[TILE_DIM][TILE_DIM + 1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces
// blockIdx.x and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
__global__ void transposeDiagonalDouble(double *odata, double *idata, int width,
int height) {
__shared__ double tile[TILE_DIM][TILE_DIM + 1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces
// blockIdx.x and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
int main(int argc, char **argv) {
struct timeval start, end;
if (argc != 3) {
printf("Correct way to execute this program is:\n");
printf("./transpose-serial NumberOfRows NumberOfColumns.\n");
return 1;
}
int rows = atoi(argv[1]);
int columns = atoi(argv[2]);
int data_size = rows * columns;
int *d_m2_arr, *d_m2Trans_arr;
double *d_m1_arr, *d_m1Trans_arr;
// creating two matrices, 1 with double values and the other with single
// values
double_matrix *m1 = newDoubleMatrix(rows, columns);
int_matrix *m2 = newIntMatrix(rows, columns);
// creating transpose matrices
double_matrix *m1Trans = newDoubleMatrix(rows, columns);
int_matrix *m2Trans = newIntMatrix(rows, columns);
int_matrix *r2 = newIntMatrix(rows, columns);
// setup execution parameters
dim3 grid(columns / BLOCK_DIM, rows / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
// initialize data on device
CUDA_CHECK_RETURN(hipMalloc((void **)&d_m2_arr, sizeof(int) * data_size));
CUDA_CHECK_RETURN(
hipMalloc((void **)&d_m2Trans_arr, sizeof(int) * data_size));
CUDA_CHECK_RETURN(hipMemcpy(d_m2_arr, m2->data, sizeof(int) * data_size,
hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMalloc((void **)&d_m1_arr, sizeof(double) * data_size));
CUDA_CHECK_RETURN(
hipMalloc((void **)&d_m1Trans_arr, sizeof(double) * data_size));
CUDA_CHECK_RETURN(hipMemcpy(d_m1_arr, m1->data, sizeof(double) * data_size,
hipMemcpyHostToDevice));
// transpose kernels
gettimeofday(&start, NULL);
for (int i = 0; i < NUM_REPS; i++) {
hipLaunchKernelGGL(( transposeDiagonalInt), dim3(grid), dim3(threads), 0, 0, d_m2Trans_arr, d_m2_arr, columns,
rows);
}
CUDA_CHECK_RETURN(
hipDeviceSynchronize()); // Wait for the GPU launched work to complete
gettimeofday(&end, NULL);
double diffInt =
(end.tv_sec - start.tv_sec) * 1000000.0 + (end.tv_usec - start.tv_usec);
printf("Transpose of matrix with int values took: %.4fms\n",
diffInt / (1000 * NUM_REPS));
gettimeofday(&start, NULL);
for (int j = 0; j < NUM_REPS; j++) {
hipLaunchKernelGGL(( transposeDiagonalDouble), dim3(grid), dim3(threads), 0, 0, d_m1Trans_arr, d_m1_arr, columns,
rows);
}
CUDA_CHECK_RETURN(
hipDeviceSynchronize()); // Wait for the GPU launched work to complete
gettimeofday(&end, NULL);
double diffDouble =
(end.tv_sec - start.tv_sec) * 1000000.0 + (end.tv_usec - start.tv_usec);
printf("Transpose of matrix with double values took: %.4fms\n",
diffDouble / (1000 * NUM_REPS));
CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipMemcpy(m2Trans->data, d_m2Trans_arr,
sizeof(int) * data_size,
hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(m1Trans->data, d_m1Trans_arr,
sizeof(double) * data_size,
hipMemcpyDeviceToHost));
// free cuda
hipFree(d_m2Trans_arr);
hipFree(d_m2_arr);
hipFree(d_m1Trans_arr);
hipFree(d_m1_arr);
// deleteing matrices
hipDeviceReset();
deleteDoubleMatrix(m1);
deleteIntMatrix(m2);
deleteDoubleMatrix(m1Trans);
deleteIntMatrix(m2Trans);
}
| fd437aa2a61cfad0f863775520f406aaafc23ad4.cu | #include "matrix.h"
#include <cuda.h>
// Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of
// BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
#define BLOCK_DIM 16
#define NUM_REPS 10
double_matrix *newDoubleMatrix(int rows, int cols) {
if (rows <= 0 || cols <= 0)
return NULL;
// allocate a matrix structure
double_matrix *m = (double_matrix *)malloc(sizeof(double_matrix));
// set dimensions
m->rows = rows;
m->cols = cols;
// allocate a double array of length rows * cols
m->data = (double *)malloc(rows * cols * sizeof(double));
// set all data to 0
int i;
srand(time(NULL)); // Initialization, should only be called once.
for (i = 0; i < rows * cols; i++)
m->data[i] = (double)rand();
return m;
}
int_matrix *newIntMatrix(int rows, int cols) {
if (rows <= 0 || cols <= 0)
return NULL;
// allocate a matrix structure
int_matrix *m = (int_matrix *)malloc(sizeof(int_matrix));
// set dimensions
m->rows = rows;
m->cols = cols;
// allocate a double array of length rows * cols
m->data = (int *)malloc(rows * cols * sizeof(int));
// set all data to 0
int i;
srand(time(NULL)); // Initialization, should only be called once.
for (i = 0; i < rows * cols; i++)
m->data[i] = rand();
return m;
}
// pointer to element in matrix by row and column location
#define ELEM(mtx, row, col) mtx->data[(col - 1) * mtx->rows + (row - 1)]
/* Prints the matrix to stdout. Returns 0 if successful
* and -1 if mtx is NULL.
*/
int printDoubleMatrix(double_matrix *mtx) {
if (!mtx)
return -1;
int row, col;
for (row = 1; row <= mtx->rows; row++) {
for (col = 1; col <= mtx->cols; col++) {
// Print the floating-point element with
// - either a - if negative or a space if positive
// - at least 3 spaces before the .
// - precision to the hundredths place
printf("% 6.2f ", ELEM(mtx, row, col));
}
// separate rows by newlines
printf("\n");
}
return 0;
}
int printIntMatrix(int_matrix *mtx) {
if (!mtx)
return -1;
int row, col;
for (row = 1; row <= mtx->rows; row++) {
for (col = 1; col <= mtx->cols; col++) {
// Print the floating-point element with
// - either a - if negative or a space if positive
// - at least 3 spaces before the .
// - precision to the hundredths place
printf("%d ", ELEM(mtx, row, col));
}
// separate rows by newlines
printf("\n");
}
return 0;
}
/* Writes the transpose of matrix in into matrix out.
* Returns 0 if successful, -1 if either in or out is NULL,
* and -2 if the dimensions of in and out are incompatible.
*/
int transposeDouble(double_matrix *in, double_matrix *out) {
if (!in || !out)
return -1;
if (in->rows != out->cols || in->cols != out->rows)
return -2;
int row, col;
for (row = 1; row <= in->rows; row++)
for (col = 1; col <= in->cols; col++)
ELEM(out, col, row) = ELEM(in, row, col);
return 0;
}
int transposeInt(int_matrix *in, int_matrix *out) {
if (!in || !out)
return -1;
if (in->rows != out->cols || in->cols != out->rows)
return -2;
int row, col;
for (row = 1; row <= in->rows; row++)
for (col = 1; col <= in->cols; col++)
ELEM(out, col, row) = ELEM(in, row, col);
return 0;
}
/* Sets the (row, col) element of mtx to val. Returns 0 if
* successful, -1 if mtx is NULL, and -2 if row or col are
* outside of the dimensions of mtx.
*/
int setDoubleElement(double_matrix *mtx, int row, int col, double val) {
if (!mtx)
return -1;
assert(mtx->data);
if (row <= 0 || row > mtx->rows || col <= 0 || col > mtx->cols)
return -2;
ELEM(mtx, row, col) = val;
return 0;
}
int setIntElement(int_matrix *mtx, int row, int col, int val) {
if (!mtx)
return -1;
assert(mtx->data);
if (row <= 0 || row > mtx->rows || col <= 0 || col > mtx->cols)
return -2;
ELEM(mtx, row, col) = val;
return 0;
}
/* Copies a matrix. Returns NULL if mtx is NULL.
*/
double_matrix *copyDoubleMatrix(double_matrix *mtx) {
if (!mtx)
return NULL;
// create a new matrix to hold the copy
double_matrix *cp = newDoubleMatrix(mtx->rows, mtx->cols);
// copy mtx's data to cp's data
memcpy(cp->data, mtx->data, mtx->rows * mtx->cols * sizeof(double));
return cp;
}
int_matrix *copyIntMatrix(int_matrix *mtx) {
if (!mtx)
return NULL;
// create a new matrix to hold the copy
int_matrix *cp = newIntMatrix(mtx->rows, mtx->cols);
// copy mtx's data to cp's data
memcpy(cp->data, mtx->data, mtx->rows * mtx->cols * sizeof(int));
return cp;
}
/* Deletes a matrix. Returns 0 if successful and -1 if mtx
* is NULL.
*/
int deleteDoubleMatrix(double_matrix *mtx) {
if (!mtx)
return -1;
// free mtx's data
assert(mtx->data);
free(mtx->data);
// free mtx itself
free(mtx);
return 0;
}
int deleteIntMatrix(int_matrix *mtx) {
if (!mtx)
return -1;
// free mtx's data
assert(mtx->data);
free(mtx->data);
// free mtx itself
free(mtx);
return 0;
}
__global__ void transposeDiagonalInt(int *odata, int *idata, int width,
int height) {
__shared__ int tile[TILE_DIM][TILE_DIM + 1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces
// blockIdx.x and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
__global__ void transposeDiagonalDouble(double *odata, double *idata, int width,
int height) {
__shared__ double tile[TILE_DIM][TILE_DIM + 1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces
// blockIdx.x and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
int main(int argc, char **argv) {
struct timeval start, end;
if (argc != 3) {
printf("Correct way to execute this program is:\n");
printf("./transpose-serial NumberOfRows NumberOfColumns.\n");
return 1;
}
int rows = atoi(argv[1]);
int columns = atoi(argv[2]);
int data_size = rows * columns;
int *d_m2_arr, *d_m2Trans_arr;
double *d_m1_arr, *d_m1Trans_arr;
// creating two matrices, 1 with double values and the other with single
// values
double_matrix *m1 = newDoubleMatrix(rows, columns);
int_matrix *m2 = newIntMatrix(rows, columns);
// creating transpose matrices
double_matrix *m1Trans = newDoubleMatrix(rows, columns);
int_matrix *m2Trans = newIntMatrix(rows, columns);
int_matrix *r2 = newIntMatrix(rows, columns);
// setup execution parameters
dim3 grid(columns / BLOCK_DIM, rows / BLOCK_DIM, 1);
dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
// initialize data on device
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_m2_arr, sizeof(int) * data_size));
CUDA_CHECK_RETURN(
cudaMalloc((void **)&d_m2Trans_arr, sizeof(int) * data_size));
CUDA_CHECK_RETURN(cudaMemcpy(d_m2_arr, m2->data, sizeof(int) * data_size,
cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_m1_arr, sizeof(double) * data_size));
CUDA_CHECK_RETURN(
cudaMalloc((void **)&d_m1Trans_arr, sizeof(double) * data_size));
CUDA_CHECK_RETURN(cudaMemcpy(d_m1_arr, m1->data, sizeof(double) * data_size,
cudaMemcpyHostToDevice));
// transpose kernels
gettimeofday(&start, NULL);
for (int i = 0; i < NUM_REPS; i++) {
transposeDiagonalInt<<<grid, threads>>>(d_m2Trans_arr, d_m2_arr, columns,
rows);
}
CUDA_CHECK_RETURN(
cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
gettimeofday(&end, NULL);
double diffInt =
(end.tv_sec - start.tv_sec) * 1000000.0 + (end.tv_usec - start.tv_usec);
printf("Transpose of matrix with int values took: %.4fms\n",
diffInt / (1000 * NUM_REPS));
gettimeofday(&start, NULL);
for (int j = 0; j < NUM_REPS; j++) {
transposeDiagonalDouble<<<grid, threads>>>(d_m1Trans_arr, d_m1_arr, columns,
rows);
}
CUDA_CHECK_RETURN(
cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
gettimeofday(&end, NULL);
double diffDouble =
(end.tv_sec - start.tv_sec) * 1000000.0 + (end.tv_usec - start.tv_usec);
printf("Transpose of matrix with double values took: %.4fms\n",
diffDouble / (1000 * NUM_REPS));
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy(m2Trans->data, d_m2Trans_arr,
sizeof(int) * data_size,
cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(m1Trans->data, d_m1Trans_arr,
sizeof(double) * data_size,
cudaMemcpyDeviceToHost));
// free cuda
cudaFree(d_m2Trans_arr);
cudaFree(d_m2_arr);
cudaFree(d_m1Trans_arr);
cudaFree(d_m1_arr);
// deleteing matrices
cudaDeviceReset();
deleteDoubleMatrix(m1);
deleteIntMatrix(m2);
deleteDoubleMatrix(m1Trans);
deleteIntMatrix(m2Trans);
}
|
7b3f929d1c7d01dc401c6153f23012a6ca618ad1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//1. Write a CUDA program to print the message Hello World and demonstrate threads by varying BLOCK_WIDTH to different sizes.
#include <stdio.h>
#define NUM_BLOCKS 32
#define BLOCK_WIDTH 3
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
//printf("Hello world! I'm thread %d\n", threadIdx.x);
}int main(int argc,char **argv)
{
// launch the kernelhipLaunchKernelGGL((
hello), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, );
// force the printf()s to flush
hipDeviceSynchronize();
printf("That's all!\n");
return 0;
} | 7b3f929d1c7d01dc401c6153f23012a6ca618ad1.cu | //1. Write a CUDA program to print the message “Hello World” and demonstrate threads by varying BLOCK_WIDTH to different sizes.
#include <stdio.h>
#define NUM_BLOCKS 32
#define BLOCK_WIDTH 3
__global__ void hello()
{
printf("Hello world! I'm a thread in block %d\n", blockIdx.x);
//printf("Hello world! I'm thread %d\n", threadIdx.x);
}int main(int argc,char **argv)
{
// launch the kernel
hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>();
// force the printf()s to flush
cudaDeviceSynchronize();
printf("That's all!\n");
return 0;
} |
959f6fe6aa5845ff855ed06eb518ea5786847847.hip | // !!! This is a file automatically generated by hipify!!!
#include "../shared/light_transport_common.h"
namespace vlr {
using namespace shared;
static constexpr int32_t debugPathLength = 0;
CUDA_DEVICE_FUNCTION bool onProbePixel() {
return optixGetLaunchIndex().x == plp.probePixX && optixGetLaunchIndex().y == plp.probePixY;
}
// Common Any Hit Program for All Primitive Types and Materials for non-shadow rays
CUDA_DEVICE_KERNEL void RT_AH_NAME(pathTracingAnyHitWithAlpha)() {
PTReadOnlyPayload* roPayload;
PTReadWritePayload* rwPayload;
PTPayloadSignature::get(&roPayload, nullptr, &rwPayload, nullptr);
float alpha = getAlpha(roPayload->wls);
// Stochastic Alpha Test
if (rwPayload->rng.getFloat0cTo1o() >= alpha)
optixIgnoreIntersection();
}
// Common Ray Generation Program for All Camera Types
CUDA_DEVICE_KERNEL void RT_RG_NAME(pathTracing)() {
uint2 launchIndex = make_uint2(optixGetLaunchIndex().x, optixGetLaunchIndex().y);
KernelRNG rng = plp.rngBuffer.read(launchIndex);
float2 p = make_float2(launchIndex.x + rng.getFloat0cTo1o(),
launchIndex.y + rng.getFloat0cTo1o());
float selectWLPDF;
WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF);
Camera camera(static_cast<ProgSigCamera_sample>(plp.progSampleLensPosition));
LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
LensPosQueryResult We0Result;
camera.sample(We0Sample, &We0Result);
IDF idf(plp.cameraDescriptor, We0Result.surfPt, wls);
SampledSpectrum We0 = idf.evaluateSpatialImportance();
IDFSample We1Sample(p.x / plp.imageSize.x, p.y / plp.imageSize.y);
IDFQueryResult We1Result;
SampledSpectrum We1 = idf.sample(IDFQuery(), We1Sample, &We1Result);
We1Result.dirPDF *= plp.imageSize.x * plp.imageSize.y;
Point3D rayOrg = We0Result.surfPt.position;
Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal);
SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) /
(We0Result.areaPDF * We1Result.dirPDF * selectWLPDF));
PTReadOnlyPayload roPayload = {};
roPayload.initImportance = alpha.importance(wls.selectedLambdaIndex());
roPayload.wls = wls;
roPayload.prevDirPDF = We1Result.dirPDF;
roPayload.prevSampledType = We1Result.sampledType;
roPayload.pathLength = 0;
roPayload.maxLengthTerminate = false;
PTWriteOnlyPayload woPayload = {};
PTReadWritePayload rwPayload = {};
rwPayload.rng = rng;
rwPayload.alpha = alpha;
rwPayload.contribution = SampledSpectrum::Zero();
PTExtraPayload exPayload = {};
PTReadOnlyPayload* roPayloadPtr = &roPayload;
PTWriteOnlyPayload* woPayloadPtr = &woPayload;
PTReadWritePayload* rwPayloadPtr = &rwPayload;
PTExtraPayload* exPayloadPtr = &exPayload;
const uint32_t MaxPathLength = 25;
while (true) {
woPayload.terminate = true;
++roPayload.pathLength;
if (roPayload.pathLength >= MaxPathLength)
roPayload.maxLengthTerminate = true;
if (debugPathLength != 0 &&
roPayload.pathLength > debugPathLength)
break;
optixu::trace<PTPayloadSignature>(
plp.topGroup, asOptiXType(rayOrg), asOptiXType(rayDir), 0.0f, FLT_MAX, 0.0f,
VisibilityGroup_Everything, OPTIX_RAY_FLAG_NONE,
PTRayType::Closest, MaxNumRayTypes, PTRayType::Closest,
roPayloadPtr, woPayloadPtr, rwPayloadPtr, exPayloadPtr);
if (roPayload.pathLength == 1) {
uint32_t linearIndex = launchIndex.y * plp.imageStrideInPixels + launchIndex.x;
DiscretizedSpectrum &accumAlbedo = plp.accumAlbedoBuffer[linearIndex];
Normal3D &accumNormal = plp.accumNormalBuffer[linearIndex];
if (plp.numAccumFrames == 1) {
accumAlbedo = DiscretizedSpectrum::Zero();
accumNormal = Normal3D(0.0f, 0.0f, 0.0f);
}
TripletSpectrum whitePoint = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65,
1, 1, 1);
accumAlbedo += DiscretizedSpectrum(wls, exPayload.firstHitAlbedo * whitePoint.evaluate(wls) / selectWLPDF);
accumNormal += exPayload.firstHitNormal;
exPayloadPtr = nullptr;
}
if (woPayload.terminate)
break;
VLRAssert(roPayload.pathLength < MaxPathLength, "Path should be terminated... Something went wrong...");
rayOrg = woPayload.nextOrigin;
rayDir = woPayload.nextDirection;
roPayload.prevDirPDF = woPayload.dirPDF;
roPayload.prevSampledType = woPayload.sampledType;
}
plp.rngBuffer.write(launchIndex, rwPayload.rng);
if (!rwPayload.contribution.allFinite()) {
vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", plp.numAccumFrames, launchIndex.x, launchIndex.y);
return;
}
if (plp.numAccumFrames == 1)
plp.accumBuffer[launchIndex].reset();
plp.accumBuffer[launchIndex].add(wls, rwPayload.contribution);
}
// Common Closest Hit Program for All Primitive Types and Materials
CUDA_DEVICE_KERNEL void RT_CH_NAME(pathTracingIteration)() {
const auto hp = HitPointParameter::get();
PTReadOnlyPayload* roPayload;
PTWriteOnlyPayload* woPayload;
PTReadWritePayload* rwPayload;
PTExtraPayload* exPayload;
PTPayloadSignature::get(&roPayload, &woPayload, &rwPayload, &exPayload);
KernelRNG &rng = rwPayload->rng;
WavelengthSamples &wls = roPayload->wls;
SurfacePoint surfPt;
float hypAreaPDF;
calcSurfacePoint(hp, wls, &surfPt, &hypAreaPDF);
const SurfaceMaterialDescriptor &matDesc = plp.materialDescriptorBuffer[hp.sbtr->geomInst.materialIndex];
constexpr TransportMode transportMode = TransportMode::Radiance;
BSDF<transportMode> bsdf(matDesc, surfPt, wls);
EDF edf(matDesc, surfPt, wls);
if (exPayload) {
exPayload->firstHitAlbedo = bsdf.getBaseColor();
exPayload->firstHitNormal = surfPt.shadingFrame.z;
}
Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(optixGetWorldRayDirection()));
// implicit light sampling
SampledSpectrum spEmittance = edf.evaluateEmittance();
if ((debugPathLength == 0 || roPayload->pathLength == debugPathLength) &&
spEmittance.hasNonZero()) {
EDFQuery feQuery(DirectionType::All(), wls);
SampledSpectrum Le = spEmittance * edf.evaluate(feQuery, dirOutLocal);
float MISWeight = 1.0f;
if (!roPayload->prevSampledType.isDelta() && roPayload->pathLength > 1) {
const Instance &inst = plp.instBuffer[surfPt.instIndex];
float instProb = inst.lightGeomInstDistribution.integral() / plp.lightInstDist.integral();
float geomInstProb = hp.sbtr->geomInst.importance / inst.lightGeomInstDistribution.integral();
float bsdfPDF = roPayload->prevDirPDF;
float dist2 = surfPt.calcSquaredDistance(asPoint3D(optixGetWorldRayOrigin()));
float lightPDF = instProb * geomInstProb * hypAreaPDF * dist2 / ::fabs(dirOutLocal.z);
MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
}
rwPayload->contribution += rwPayload->alpha * Le * MISWeight;
}
if (surfPt.atInfinity || roPayload->maxLengthTerminate)
return;
// Russian roulette
float continueProb = std::fmin(rwPayload->alpha.importance(wls.selectedLambdaIndex()) / roPayload->initImportance, 1.0f);
if (rng.getFloat0cTo1o() >= continueProb)
return;
rwPayload->alpha /= continueProb;
Normal3D geomNormalLocal = surfPt.shadingFrame.toLocal(surfPt.geometricNormal);
BSDFQuery fsQuery(dirOutLocal, geomNormalLocal, transportMode, DirectionType::All(), wls);
// Next Event Estimation (explicit light sampling)
if ((debugPathLength == 0 || (roPayload->pathLength + 1) == debugPathLength) &&
bsdf.hasNonDelta()) {
float uLight = rng.getFloat0cTo1o();
SurfaceLight light;
float lightProb;
float uPrim;
selectSurfaceLight(uLight, &light, &lightProb, &uPrim);
SurfaceLightPosSample lpSample(uPrim, rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
SurfaceLightPosQueryResult lpResult;
light.sample(lpSample, surfPt.position, &lpResult);
const SurfaceMaterialDescriptor &lightMatDesc = plp.materialDescriptorBuffer[lpResult.materialIndex];
EDF ledf(lightMatDesc, lpResult.surfPt, wls);
SampledSpectrum M = ledf.evaluateEmittance();
Vector3D shadowRayDir;
float squaredDistance;
float fractionalVisibility;
if (M.hasNonZero() &&
testVisibility<PTRayType::Shadow>(
surfPt, lpResult.surfPt, wls, &shadowRayDir, &squaredDistance,
&fractionalVisibility)) {
float recSquaredDistance = 1.0f / squaredDistance;
Vector3D shadowRayDir_l = lpResult.surfPt.toLocal(-shadowRayDir);
Vector3D shadowRayDir_sn = surfPt.toLocal(shadowRayDir);
EDFQuery feQuery(DirectionType::All(), wls);
SampledSpectrum Le = M * ledf.evaluate(feQuery, shadowRayDir_l);
float lightPDF = lightProb * lpResult.areaPDF;
SampledSpectrum fs = bsdf.evaluate(fsQuery, shadowRayDir_sn);
float cosLight = lpResult.surfPt.calcCosTerm(-shadowRayDir);
float bsdfPDF = bsdf.evaluatePDF(fsQuery, shadowRayDir_sn) * cosLight * recSquaredDistance;
float MISWeight = 1.0f;
if (!lpResult.posType.isDelta() && !::vlr::isinf(lightPDF))
MISWeight = (lightPDF * lightPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
float G = fractionalVisibility * absDot(shadowRayDir_sn, geomNormalLocal) * cosLight * recSquaredDistance;
float scalarCoeff = G * MISWeight / lightPDF; // contributionCUDA
rwPayload->contribution += rwPayload->alpha * Le * fs * scalarCoeff;
}
}
BSDFSample sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
BSDFQueryResult fsResult;
SampledSpectrum fs = bsdf.sample(fsQuery, sample, &fsResult);
if (fs == SampledSpectrum::Zero() || fsResult.dirPDF == 0.0f)
return;
if (fsResult.sampledType.isDispersive() && !wls.singleIsSelected()) {
fsResult.dirPDF /= SampledSpectrum::NumComponents();
wls.setSingleIsSelected();
}
float cosFactor = dot(fsResult.dirLocal, geomNormalLocal);
rwPayload->alpha *= fs * (::fabs(cosFactor) / fsResult.dirPDF);
Vector3D dirIn = surfPt.fromLocal(fsResult.dirLocal);
woPayload->nextOrigin = offsetRayOrigin(surfPt.position, cosFactor > 0.0f ? surfPt.geometricNormal : -surfPt.geometricNormal);
woPayload->nextDirection = dirIn;
woPayload->dirPDF = fsResult.dirPDF;
woPayload->sampledType = fsResult.sampledType;
woPayload->terminate = false;
}
// JP: Intersection/Bounding Box ProgramClosest Hit Program
// OptiXBVHLBVHAABB
// Miss Program
CUDA_DEVICE_KERNEL void RT_MS_NAME(pathTracingMiss)() {
PTReadOnlyPayload* roPayload;
PTReadWritePayload* rwPayload;
PTExtraPayload* exPayload;
PTPayloadSignature::get(&roPayload, nullptr, &rwPayload, &exPayload);
if (exPayload) {
exPayload->firstHitAlbedo = SampledSpectrum::Zero();
exPayload->firstHitNormal = Normal3D(0.0f, 0.0f, 0.0f);
}
const Instance &inst = plp.instBuffer[plp.envLightInstIndex];
const GeometryInstance &geomInst = plp.geomInstBuffer[inst.geomInstIndices[0]];
if (geomInst.importance == 0)
return;
Vector3D direction = asVector3D(optixGetWorldRayDirection());
float phi, theta;
direction.toPolarYUp(&theta, &phi);
float sinPhi, cosPhi;
::vlr::sincos(phi, &sinPhi, &cosPhi);
Vector3D texCoord0Dir = normalize(Vector3D(-cosPhi, 0.0f, -sinPhi));
ReferenceFrame shadingFrame;
shadingFrame.x = texCoord0Dir;
shadingFrame.z = -direction;
shadingFrame.y = cross(shadingFrame.z, shadingFrame.x);
SurfacePoint surfPt;
surfPt.position = Point3D(direction.x, direction.y, direction.z);
surfPt.shadingFrame = shadingFrame;
surfPt.isPoint = false;
surfPt.atInfinity = true;
surfPt.geometricNormal = -direction;
surfPt.u = phi;
surfPt.v = theta;
phi += inst.rotationPhi;
phi = phi - ::vlr::floor(phi / (2 * VLR_M_PI)) * 2 * VLR_M_PI;
surfPt.texCoord = TexCoord2D(phi / (2 * VLR_M_PI), theta / VLR_M_PI);
VLRAssert(vlr::isfinite(phi) && vlr::isfinite(theta), "\"phi\", \"theta\": Not finite values %g, %g.", phi, theta);
float uvPDF = geomInst.asInfSphere.importanceMap.evaluatePDF(phi / (2 * VLR_M_PI), theta / VLR_M_PI);
float hypAreaPDF = uvPDF / (2 * VLR_M_PI * VLR_M_PI * std::sin(theta));
const SurfaceMaterialDescriptor &matDesc = plp.materialDescriptorBuffer[geomInst.materialIndex];
EDF edf(matDesc, surfPt, roPayload->wls);
Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-direction);
// implicit light sampling
SampledSpectrum spEmittance = edf.evaluateEmittance();
if ((debugPathLength == 0 || roPayload->pathLength == debugPathLength) &&
spEmittance.hasNonZero()) {
EDFQuery feQuery(DirectionType::All(), roPayload->wls);
SampledSpectrum Le = spEmittance * edf.evaluate(feQuery, dirOutLocal);
float MISWeight = 1.0f;
if (!roPayload->prevSampledType.isDelta() && roPayload->pathLength > 1) {
float instProb = inst.lightGeomInstDistribution.integral() / plp.lightInstDist.integral();
float geomInstProb = geomInst.importance / inst.lightGeomInstDistribution.integral();
float bsdfPDF = roPayload->prevDirPDF;
float dist2 = surfPt.calcSquaredDistance(asPoint3D(optixGetWorldRayOrigin()));
float lightPDF = instProb * geomInstProb * hypAreaPDF * dist2 / ::fabs(dirOutLocal.z);
MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
}
rwPayload->contribution += rwPayload->alpha * Le * MISWeight;
}
}
}
| 959f6fe6aa5845ff855ed06eb518ea5786847847.cu | #include "../shared/light_transport_common.h"
namespace vlr {
using namespace shared;
static constexpr int32_t debugPathLength = 0;
CUDA_DEVICE_FUNCTION bool onProbePixel() {
return optixGetLaunchIndex().x == plp.probePixX && optixGetLaunchIndex().y == plp.probePixY;
}
// Common Any Hit Program for All Primitive Types and Materials for non-shadow rays
CUDA_DEVICE_KERNEL void RT_AH_NAME(pathTracingAnyHitWithAlpha)() {
PTReadOnlyPayload* roPayload;
PTReadWritePayload* rwPayload;
PTPayloadSignature::get(&roPayload, nullptr, &rwPayload, nullptr);
float alpha = getAlpha(roPayload->wls);
// Stochastic Alpha Test
if (rwPayload->rng.getFloat0cTo1o() >= alpha)
optixIgnoreIntersection();
}
// Common Ray Generation Program for All Camera Types
CUDA_DEVICE_KERNEL void RT_RG_NAME(pathTracing)() {
uint2 launchIndex = make_uint2(optixGetLaunchIndex().x, optixGetLaunchIndex().y);
KernelRNG rng = plp.rngBuffer.read(launchIndex);
float2 p = make_float2(launchIndex.x + rng.getFloat0cTo1o(),
launchIndex.y + rng.getFloat0cTo1o());
float selectWLPDF;
WavelengthSamples wls = WavelengthSamples::createWithEqualOffsets(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), &selectWLPDF);
Camera camera(static_cast<ProgSigCamera_sample>(plp.progSampleLensPosition));
LensPosSample We0Sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
LensPosQueryResult We0Result;
camera.sample(We0Sample, &We0Result);
IDF idf(plp.cameraDescriptor, We0Result.surfPt, wls);
SampledSpectrum We0 = idf.evaluateSpatialImportance();
IDFSample We1Sample(p.x / plp.imageSize.x, p.y / plp.imageSize.y);
IDFQueryResult We1Result;
SampledSpectrum We1 = idf.sample(IDFQuery(), We1Sample, &We1Result);
We1Result.dirPDF *= plp.imageSize.x * plp.imageSize.y;
Point3D rayOrg = We0Result.surfPt.position;
Vector3D rayDir = We0Result.surfPt.fromLocal(We1Result.dirLocal);
SampledSpectrum alpha = (We0 * We1) * (We0Result.surfPt.calcCosTerm(rayDir) /
(We0Result.areaPDF * We1Result.dirPDF * selectWLPDF));
PTReadOnlyPayload roPayload = {};
roPayload.initImportance = alpha.importance(wls.selectedLambdaIndex());
roPayload.wls = wls;
roPayload.prevDirPDF = We1Result.dirPDF;
roPayload.prevSampledType = We1Result.sampledType;
roPayload.pathLength = 0;
roPayload.maxLengthTerminate = false;
PTWriteOnlyPayload woPayload = {};
PTReadWritePayload rwPayload = {};
rwPayload.rng = rng;
rwPayload.alpha = alpha;
rwPayload.contribution = SampledSpectrum::Zero();
PTExtraPayload exPayload = {};
PTReadOnlyPayload* roPayloadPtr = &roPayload;
PTWriteOnlyPayload* woPayloadPtr = &woPayload;
PTReadWritePayload* rwPayloadPtr = &rwPayload;
PTExtraPayload* exPayloadPtr = &exPayload;
const uint32_t MaxPathLength = 25;
while (true) {
woPayload.terminate = true;
++roPayload.pathLength;
if (roPayload.pathLength >= MaxPathLength)
roPayload.maxLengthTerminate = true;
if (debugPathLength != 0 &&
roPayload.pathLength > debugPathLength)
break;
optixu::trace<PTPayloadSignature>(
plp.topGroup, asOptiXType(rayOrg), asOptiXType(rayDir), 0.0f, FLT_MAX, 0.0f,
VisibilityGroup_Everything, OPTIX_RAY_FLAG_NONE,
PTRayType::Closest, MaxNumRayTypes, PTRayType::Closest,
roPayloadPtr, woPayloadPtr, rwPayloadPtr, exPayloadPtr);
if (roPayload.pathLength == 1) {
uint32_t linearIndex = launchIndex.y * plp.imageStrideInPixels + launchIndex.x;
DiscretizedSpectrum &accumAlbedo = plp.accumAlbedoBuffer[linearIndex];
Normal3D &accumNormal = plp.accumNormalBuffer[linearIndex];
if (plp.numAccumFrames == 1) {
accumAlbedo = DiscretizedSpectrum::Zero();
accumNormal = Normal3D(0.0f, 0.0f, 0.0f);
}
TripletSpectrum whitePoint = createTripletSpectrum(SpectrumType::LightSource, ColorSpace::Rec709_D65,
1, 1, 1);
accumAlbedo += DiscretizedSpectrum(wls, exPayload.firstHitAlbedo * whitePoint.evaluate(wls) / selectWLPDF);
accumNormal += exPayload.firstHitNormal;
exPayloadPtr = nullptr;
}
if (woPayload.terminate)
break;
VLRAssert(roPayload.pathLength < MaxPathLength, "Path should be terminated... Something went wrong...");
rayOrg = woPayload.nextOrigin;
rayDir = woPayload.nextDirection;
roPayload.prevDirPDF = woPayload.dirPDF;
roPayload.prevSampledType = woPayload.sampledType;
}
plp.rngBuffer.write(launchIndex, rwPayload.rng);
if (!rwPayload.contribution.allFinite()) {
vlrprintf("Pass %u, (%u, %u): Not a finite value.\n", plp.numAccumFrames, launchIndex.x, launchIndex.y);
return;
}
if (plp.numAccumFrames == 1)
plp.accumBuffer[launchIndex].reset();
plp.accumBuffer[launchIndex].add(wls, rwPayload.contribution);
}
// Common Closest Hit Program for All Primitive Types and Materials
CUDA_DEVICE_KERNEL void RT_CH_NAME(pathTracingIteration)() {
const auto hp = HitPointParameter::get();
PTReadOnlyPayload* roPayload;
PTWriteOnlyPayload* woPayload;
PTReadWritePayload* rwPayload;
PTExtraPayload* exPayload;
PTPayloadSignature::get(&roPayload, &woPayload, &rwPayload, &exPayload);
KernelRNG &rng = rwPayload->rng;
WavelengthSamples &wls = roPayload->wls;
SurfacePoint surfPt;
float hypAreaPDF;
calcSurfacePoint(hp, wls, &surfPt, &hypAreaPDF);
const SurfaceMaterialDescriptor &matDesc = plp.materialDescriptorBuffer[hp.sbtr->geomInst.materialIndex];
constexpr TransportMode transportMode = TransportMode::Radiance;
BSDF<transportMode> bsdf(matDesc, surfPt, wls);
EDF edf(matDesc, surfPt, wls);
if (exPayload) {
exPayload->firstHitAlbedo = bsdf.getBaseColor();
exPayload->firstHitNormal = surfPt.shadingFrame.z;
}
Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-asVector3D(optixGetWorldRayDirection()));
// implicit light sampling
SampledSpectrum spEmittance = edf.evaluateEmittance();
if ((debugPathLength == 0 || roPayload->pathLength == debugPathLength) &&
spEmittance.hasNonZero()) {
EDFQuery feQuery(DirectionType::All(), wls);
SampledSpectrum Le = spEmittance * edf.evaluate(feQuery, dirOutLocal);
float MISWeight = 1.0f;
if (!roPayload->prevSampledType.isDelta() && roPayload->pathLength > 1) {
const Instance &inst = plp.instBuffer[surfPt.instIndex];
float instProb = inst.lightGeomInstDistribution.integral() / plp.lightInstDist.integral();
float geomInstProb = hp.sbtr->geomInst.importance / inst.lightGeomInstDistribution.integral();
float bsdfPDF = roPayload->prevDirPDF;
float dist2 = surfPt.calcSquaredDistance(asPoint3D(optixGetWorldRayOrigin()));
float lightPDF = instProb * geomInstProb * hypAreaPDF * dist2 / std::fabs(dirOutLocal.z);
MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
}
rwPayload->contribution += rwPayload->alpha * Le * MISWeight;
}
if (surfPt.atInfinity || roPayload->maxLengthTerminate)
return;
// Russian roulette
float continueProb = std::fmin(rwPayload->alpha.importance(wls.selectedLambdaIndex()) / roPayload->initImportance, 1.0f);
if (rng.getFloat0cTo1o() >= continueProb)
return;
rwPayload->alpha /= continueProb;
Normal3D geomNormalLocal = surfPt.shadingFrame.toLocal(surfPt.geometricNormal);
BSDFQuery fsQuery(dirOutLocal, geomNormalLocal, transportMode, DirectionType::All(), wls);
// Next Event Estimation (explicit light sampling)
if ((debugPathLength == 0 || (roPayload->pathLength + 1) == debugPathLength) &&
bsdf.hasNonDelta()) {
float uLight = rng.getFloat0cTo1o();
SurfaceLight light;
float lightProb;
float uPrim;
selectSurfaceLight(uLight, &light, &lightProb, &uPrim);
SurfaceLightPosSample lpSample(uPrim, rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
SurfaceLightPosQueryResult lpResult;
light.sample(lpSample, surfPt.position, &lpResult);
const SurfaceMaterialDescriptor &lightMatDesc = plp.materialDescriptorBuffer[lpResult.materialIndex];
EDF ledf(lightMatDesc, lpResult.surfPt, wls);
SampledSpectrum M = ledf.evaluateEmittance();
Vector3D shadowRayDir;
float squaredDistance;
float fractionalVisibility;
if (M.hasNonZero() &&
testVisibility<PTRayType::Shadow>(
surfPt, lpResult.surfPt, wls, &shadowRayDir, &squaredDistance,
&fractionalVisibility)) {
float recSquaredDistance = 1.0f / squaredDistance;
Vector3D shadowRayDir_l = lpResult.surfPt.toLocal(-shadowRayDir);
Vector3D shadowRayDir_sn = surfPt.toLocal(shadowRayDir);
EDFQuery feQuery(DirectionType::All(), wls);
SampledSpectrum Le = M * ledf.evaluate(feQuery, shadowRayDir_l);
float lightPDF = lightProb * lpResult.areaPDF;
SampledSpectrum fs = bsdf.evaluate(fsQuery, shadowRayDir_sn);
float cosLight = lpResult.surfPt.calcCosTerm(-shadowRayDir);
float bsdfPDF = bsdf.evaluatePDF(fsQuery, shadowRayDir_sn) * cosLight * recSquaredDistance;
float MISWeight = 1.0f;
if (!lpResult.posType.isDelta() && !::vlr::isinf(lightPDF))
MISWeight = (lightPDF * lightPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
float G = fractionalVisibility * absDot(shadowRayDir_sn, geomNormalLocal) * cosLight * recSquaredDistance;
float scalarCoeff = G * MISWeight / lightPDF; // 直接contributionの計算式に入れるとCUDAのバグなのかおかしな結果になる。
rwPayload->contribution += rwPayload->alpha * Le * fs * scalarCoeff;
}
}
BSDFSample sample(rng.getFloat0cTo1o(), rng.getFloat0cTo1o(), rng.getFloat0cTo1o());
BSDFQueryResult fsResult;
SampledSpectrum fs = bsdf.sample(fsQuery, sample, &fsResult);
if (fs == SampledSpectrum::Zero() || fsResult.dirPDF == 0.0f)
return;
if (fsResult.sampledType.isDispersive() && !wls.singleIsSelected()) {
fsResult.dirPDF /= SampledSpectrum::NumComponents();
wls.setSingleIsSelected();
}
float cosFactor = dot(fsResult.dirLocal, geomNormalLocal);
rwPayload->alpha *= fs * (std::fabs(cosFactor) / fsResult.dirPDF);
Vector3D dirIn = surfPt.fromLocal(fsResult.dirLocal);
woPayload->nextOrigin = offsetRayOrigin(surfPt.position, cosFactor > 0.0f ? surfPt.geometricNormal : -surfPt.geometricNormal);
woPayload->nextDirection = dirIn;
woPayload->dirPDF = fsResult.dirPDF;
woPayload->sampledType = fsResult.sampledType;
woPayload->terminate = false;
}
// JP: 本当は無限大の球のIntersection/Bounding Box Programを使用して環境光に関する処理もClosest Hit Programで統一的に行いたい。
// が、OptiXのBVHビルダーがLBVHベースなので無限大のAABBを生成するのは危険。
// 仕方なくMiss Programで環境光を処理する。
CUDA_DEVICE_KERNEL void RT_MS_NAME(pathTracingMiss)() {
PTReadOnlyPayload* roPayload;
PTReadWritePayload* rwPayload;
PTExtraPayload* exPayload;
PTPayloadSignature::get(&roPayload, nullptr, &rwPayload, &exPayload);
if (exPayload) {
exPayload->firstHitAlbedo = SampledSpectrum::Zero();
exPayload->firstHitNormal = Normal3D(0.0f, 0.0f, 0.0f);
}
const Instance &inst = plp.instBuffer[plp.envLightInstIndex];
const GeometryInstance &geomInst = plp.geomInstBuffer[inst.geomInstIndices[0]];
if (geomInst.importance == 0)
return;
Vector3D direction = asVector3D(optixGetWorldRayDirection());
float phi, theta;
direction.toPolarYUp(&theta, &phi);
float sinPhi, cosPhi;
::vlr::sincos(phi, &sinPhi, &cosPhi);
Vector3D texCoord0Dir = normalize(Vector3D(-cosPhi, 0.0f, -sinPhi));
ReferenceFrame shadingFrame;
shadingFrame.x = texCoord0Dir;
shadingFrame.z = -direction;
shadingFrame.y = cross(shadingFrame.z, shadingFrame.x);
SurfacePoint surfPt;
surfPt.position = Point3D(direction.x, direction.y, direction.z);
surfPt.shadingFrame = shadingFrame;
surfPt.isPoint = false;
surfPt.atInfinity = true;
surfPt.geometricNormal = -direction;
surfPt.u = phi;
surfPt.v = theta;
phi += inst.rotationPhi;
phi = phi - ::vlr::floor(phi / (2 * VLR_M_PI)) * 2 * VLR_M_PI;
surfPt.texCoord = TexCoord2D(phi / (2 * VLR_M_PI), theta / VLR_M_PI);
VLRAssert(vlr::isfinite(phi) && vlr::isfinite(theta), "\"phi\", \"theta\": Not finite values %g, %g.", phi, theta);
float uvPDF = geomInst.asInfSphere.importanceMap.evaluatePDF(phi / (2 * VLR_M_PI), theta / VLR_M_PI);
float hypAreaPDF = uvPDF / (2 * VLR_M_PI * VLR_M_PI * std::sin(theta));
const SurfaceMaterialDescriptor &matDesc = plp.materialDescriptorBuffer[geomInst.materialIndex];
EDF edf(matDesc, surfPt, roPayload->wls);
Vector3D dirOutLocal = surfPt.shadingFrame.toLocal(-direction);
// implicit light sampling
SampledSpectrum spEmittance = edf.evaluateEmittance();
if ((debugPathLength == 0 || roPayload->pathLength == debugPathLength) &&
spEmittance.hasNonZero()) {
EDFQuery feQuery(DirectionType::All(), roPayload->wls);
SampledSpectrum Le = spEmittance * edf.evaluate(feQuery, dirOutLocal);
float MISWeight = 1.0f;
if (!roPayload->prevSampledType.isDelta() && roPayload->pathLength > 1) {
float instProb = inst.lightGeomInstDistribution.integral() / plp.lightInstDist.integral();
float geomInstProb = geomInst.importance / inst.lightGeomInstDistribution.integral();
float bsdfPDF = roPayload->prevDirPDF;
float dist2 = surfPt.calcSquaredDistance(asPoint3D(optixGetWorldRayOrigin()));
float lightPDF = instProb * geomInstProb * hypAreaPDF * dist2 / std::fabs(dirOutLocal.z);
MISWeight = (bsdfPDF * bsdfPDF) / (lightPDF * lightPDF + bsdfPDF * bsdfPDF);
}
rwPayload->contribution += rwPayload->alpha * Le * MISWeight;
}
}
}
|
3a76488d107ef68eaa3c4a6a4ea262e5085d1857.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <math.h>
#define SIZE 3
#define BLOCKSIZE 512
#define PI 3.1415926535897932384626433
extern "C" void hello_world(){
printf("oi, fui importada com sucesso. \n");
}
extern "C" void randomize(float *dV, int size){
hiprandGenerator_t prng;
/* create generator*/
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_XORWOW);
/* generate seed */
hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) 1337);
/* randomize */
hiprandGenerateUniform(prng, dV, size);
}
extern "C" __global__ void applyfunction(float *dV, float *dV2, int size, float k, float M){ int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < size){
dV[i] = ((float)sin((2*M + 1)*PI*dV[i])*cos(2*PI*k*dV[i]))/sin(PI*dV[i]);
dV2[i] = dV[i]*dV[i];
}
}
extern "C" __global__ void sumvec(float *idata, int size){
__shared__ float sdata[BLOCKSIZE];
int s;
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int pseudoIf = i < size;
/*
if (blockIdx.x == 1 && threadIdx.x == 0){
printf("i = %d; size = %d; pseudoIf = %d\n", i, size, pseudoIf);
}*/
sdata[tid] = pseudoIf*idata[i];
/*
__syncthreads();
if (tid == 0){
for (s = 0; s < size; s++){
printf("id = %d sdata[%d] = %f; %f\n", blockIdx.x, s, sdata[s], idata[blockIdx.x*blockDim.x + s]);
}
}*/
__syncthreads();
for (s = blockDim.x/2; s > 0; s >>= 1){
if (tid < s){
sdata[tid] = sdata[tid] + sdata[tid+s];
}
__syncthreads();
}
if (tid == 0){
printf("id = %d, stored = %f\n", blockIdx.x, sdata[0]);
idata[blockIdx.x] = sdata[0];
}
}
extern "C" float* MC_CUDA(int N, float k, float M){
int i, devID = 0;
float *dV, *dV2, f, f2;
hipError_t error;
hipDeviceProp_t deviceProp;
static float resultados[2];
/*CUDA boring stuff */
error = hipGetDevice(&devID);
if (error != hipSuccess){
printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp,devID);
if (deviceProp.computeMode == hipComputeModeProhibited){
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess){
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
/* Allocate array on device */
error = hipMalloc(&dV, sizeof(float)*N);
if (error != hipSuccess){
printf("hipMalloc returned error %s (code %d), line(%d) \n", hipGetErrorString(error), error, __LINE__);
}
error = hipMalloc(&dV2, sizeof(float)*N);
if (error != hipSuccess){
printf("hipMalloc returned error %s (code %d), line(%d) \n", hipGetErrorString(error), error, __LINE__);
}
/* Generate array */
randomize(dV, N);
/* Apply function */
hipLaunchKernelGGL(( applyfunction), dim3((1 + (N/BLOCKSIZE))), dim3(BLOCKSIZE), 0, 0, dV, dV2, N, k, M);
/* Sum all values */
for (i = N; i > 1; i = 1+(i/BLOCKSIZE)){
printf("Number of blocks = %d\n", 1+(i/BLOCKSIZE));
printf("Size of array = %d\n", i);
hipLaunchKernelGGL(( sumvec), dim3((1+(i/BLOCKSIZE))), dim3(BLOCKSIZE), 0, 0, dV, i);
error = hipDeviceSynchronize();
if (error != hipSuccess){
printf("hipDeviceSynchronize returned error %s (code %d), line(%d) \n", hipGetErrorString(error), error, __LINE__);
}
printf("WAIT!\n");
hipLaunchKernelGGL(( sumvec), dim3((1+(i/BLOCKSIZE))), dim3(BLOCKSIZE), 0, 0, dV2, i);
error = hipDeviceSynchronize();
if (error != hipSuccess){
printf("hipDeviceSynchronize returned error %s (code %d), line(%d) \n", hipGetErrorString(error), error, __LINE__);
}
}
/* Copy values from device */
error = hipMemcpy(&f, &dV[0], sizeof(float), hipMemcpyDeviceToHost);
if (error != hipSuccess){
printf("hipMemcpy returned error %s (code %d), line(%d) \n", hipGetErrorString(error), error, __LINE__);
}
error = hipMemcpy(&f2, &dV2[0], sizeof(float), hipMemcpyDeviceToHost);
if (error != hipSuccess){
printf("hipMemcpy returned error %s (code %d), line(%d) \n", hipGetErrorString(error), error, __LINE__);
}
/* Calculate results */
printf("SOMA = %f\n", f);
f /= N;
f2 /= N;
resultados[0] = f;
resultados[1] = f2;
hipFree(dV);
hipFree(dV2);
return resultados;
} | 3a76488d107ef68eaa3c4a6a4ea262e5085d1857.cu | #include <stdio.h>
#include <assert.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <math.h>
#define SIZE 3
#define BLOCKSIZE 512
#define PI 3.1415926535897932384626433
extern "C" void hello_world(){
printf("oi, fui importada com sucesso. \n");
}
extern "C" void randomize(float *dV, int size){
curandGenerator_t prng;
/* create generator*/
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_XORWOW);
/* generate seed */
curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) 1337);
/* randomize */
curandGenerateUniform(prng, dV, size);
}
extern "C" __global__ void applyfunction(float *dV, float *dV2, int size, float k, float M){ int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < size){
dV[i] = ((float)sin((2*M + 1)*PI*dV[i])*cos(2*PI*k*dV[i]))/sin(PI*dV[i]);
dV2[i] = dV[i]*dV[i];
}
}
extern "C" __global__ void sumvec(float *idata, int size){
__shared__ float sdata[BLOCKSIZE];
int s;
int tid = threadIdx.x;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int pseudoIf = i < size;
/*
if (blockIdx.x == 1 && threadIdx.x == 0){
printf("i = %d; size = %d; pseudoIf = %d\n", i, size, pseudoIf);
}*/
sdata[tid] = pseudoIf*idata[i];
/*
__syncthreads();
if (tid == 0){
for (s = 0; s < size; s++){
printf("id = %d sdata[%d] = %f; %f\n", blockIdx.x, s, sdata[s], idata[blockIdx.x*blockDim.x + s]);
}
}*/
__syncthreads();
for (s = blockDim.x/2; s > 0; s >>= 1){
if (tid < s){
sdata[tid] = sdata[tid] + sdata[tid+s];
}
__syncthreads();
}
if (tid == 0){
printf("id = %d, stored = %f\n", blockIdx.x, sdata[0]);
idata[blockIdx.x] = sdata[0];
}
}
extern "C" float* MC_CUDA(int N, float k, float M){
int i, devID = 0;
float *dV, *dV2, f, f2;
cudaError_t error;
cudaDeviceProp deviceProp;
static float resultados[2];
/*CUDA boring stuff */
error = cudaGetDevice(&devID);
if (error != cudaSuccess){
printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp,devID);
if (deviceProp.computeMode == cudaComputeModeProhibited){
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess){
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
/* Allocate array on device */
error = cudaMalloc(&dV, sizeof(float)*N);
if (error != cudaSuccess){
printf("cudaMalloc returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaMalloc(&dV2, sizeof(float)*N);
if (error != cudaSuccess){
printf("cudaMalloc returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__);
}
/* Generate array */
randomize(dV, N);
/* Apply function */
applyfunction<<<(1 + (N/BLOCKSIZE)), BLOCKSIZE>>>(dV, dV2, N, k, M);
/* Sum all values */
for (i = N; i > 1; i = 1+(i/BLOCKSIZE)){
printf("Number of blocks = %d\n", 1+(i/BLOCKSIZE));
printf("Size of array = %d\n", i);
sumvec<<<(1+(i/BLOCKSIZE)), BLOCKSIZE>>>(dV, i);
error = cudaDeviceSynchronize();
if (error != cudaSuccess){
printf("cudaDeviceSynchronize returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__);
}
printf("WAIT!\n");
sumvec<<<(1+(i/BLOCKSIZE)), BLOCKSIZE>>>(dV2, i);
error = cudaDeviceSynchronize();
if (error != cudaSuccess){
printf("cudaDeviceSynchronize returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__);
}
}
/* Copy values from device */
error = cudaMemcpy(&f, &dV[0], sizeof(float), cudaMemcpyDeviceToHost);
if (error != cudaSuccess){
printf("cudaMemcpy returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaMemcpy(&f2, &dV2[0], sizeof(float), cudaMemcpyDeviceToHost);
if (error != cudaSuccess){
printf("cudaMemcpy returned error %s (code %d), line(%d) \n", cudaGetErrorString(error), error, __LINE__);
}
/* Calculate results */
printf("SOMA = %f\n", f);
f /= N;
f2 /= N;
resultados[0] = f;
resultados[1] = f2;
cudaFree(dV);
cudaFree(dV2);
return resultados;
} |
56badca22e2cebb1b6486663f84f88980da6cf69.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "subtract_arrays_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int64_t __restrict *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int64_t __restrict *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int64_t array_count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
subtract_arrays_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,array_count);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
subtract_arrays_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,array_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
subtract_arrays_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,array_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 56badca22e2cebb1b6486663f84f88980da6cf69.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "subtract_arrays_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int64_t __restrict *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int64_t __restrict *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int64_t array_count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
subtract_arrays_kernel<<<gridBlock,threadBlock>>>(a,b,array_count);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
subtract_arrays_kernel<<<gridBlock,threadBlock>>>(a,b,array_count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
subtract_arrays_kernel<<<gridBlock,threadBlock>>>(a,b,array_count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
19fff742b779991d8a79f0f34b097ac1203d29f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#define row1 10
#define col1 10
#define row2 10
#define col2 10
typedef long long int LLI;
__global__ void matproductsharedmemory(LLI *l,LLI *m, LLI *n)
{
LLI x=blockIdx.x;
LLI y=blockIdx.y;
__shared__ LLI p[col1];
LLI i;
LLI k=threadIdx.x;
n[col2*y+x]=0;
p[k]=l[col1*y+k]*m[col2*k+x];
__syncthreads();
for(i=0;i<col1;i++)
n[col2*y+x]=n[col2*y+x]+p[i];
}
int main()
{
LLI a[row1][col1];
LLI b[row2][col2];
LLI c[row1][col2];
LLI *d,*e,*f;
LLI i,j;
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
a[i][j]= i*row1+j;
}
}
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
b[i][j]=i*row2+j;
}
}
hipMalloc((void **)&d,row1*col1*sizeof(LLI));
hipMalloc((void **)&e,row2*col2*sizeof(LLI));
hipMalloc((void **)&f,row1*col2*sizeof(LLI));
hipMemcpy(d,a,row1*col1*sizeof(LLI),hipMemcpyHostToDevice);
hipMemcpy(e,b,row2*col2*sizeof(LLI),hipMemcpyHostToDevice);
dim3 grid(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
hipLaunchKernelGGL((
matproductsharedmemory), dim3(grid),dim3(col1), 0, 0, d,e,f);
hipMemcpy(c,f,row1*col2*sizeof(LLI),hipMemcpyDeviceToHost);
/*
printf("\n Product of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%Ld\t",c[i][j]);
}
printf("\n");
}
*/
hipFree(d);
hipFree(e);
hipFree(f);
return 0;
}
/*
OUTPUT profile
==13287== NVPROF is profiling process 13287, command: ./a.out
==13287== Profiling application: ./a.out
==13287== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 94.72% 2.5322ms 1 2.5322ms 2.5322ms 2.5322ms matproductsharedmemory(__int64*, __int64*, __int64*)
3.68% 98.338us 2 49.169us 49.025us 49.313us [CUDA memcpy HtoD]
1.61% 42.913us 1 42.913us 42.913us 42.913us [CUDA memcpy DtoH]
API calls: 98.22% 189.54ms 3 63.178ms 5.3290us 189.52ms hipMalloc
1.43% 2.7661ms 3 922.02us 26.698us 2.6712ms hipMemcpy
0.19% 361.76us 94 3.8480us 170ns 233.68us hipDeviceGetAttribute
0.08% 150.22us 3 50.073us 6.2080us 110.67us hipFree
0.05% 89.941us 1 89.941us 89.941us 89.941us cuDeviceTotalMem
0.01% 27.216us 1 27.216us 27.216us 27.216us hipDeviceGetName
0.01% 24.939us 1 24.939us 24.939us 24.939us hipLaunch
0.00% 2.2690us 3 756ns 186ns 1.7650us hipGetDeviceCount
0.00% 1.0820us 2 541ns 239ns 843ns hipDeviceGet
0.00% 955ns 3 318ns 172ns 542ns hipSetupArgument
0.00% 724ns 1 724ns 724ns 724ns hipConfigureCall
*/ | 19fff742b779991d8a79f0f34b097ac1203d29f9.cu | #include<stdio.h>
#include<cuda.h>
#define row1 10
#define col1 10
#define row2 10
#define col2 10
typedef long long int LLI;
__global__ void matproductsharedmemory(LLI *l,LLI *m, LLI *n)
{
LLI x=blockIdx.x;
LLI y=blockIdx.y;
__shared__ LLI p[col1];
LLI i;
LLI k=threadIdx.x;
n[col2*y+x]=0;
p[k]=l[col1*y+k]*m[col2*k+x];
__syncthreads();
for(i=0;i<col1;i++)
n[col2*y+x]=n[col2*y+x]+p[i];
}
int main()
{
LLI a[row1][col1];
LLI b[row2][col2];
LLI c[row1][col2];
LLI *d,*e,*f;
LLI i,j;
for(i=0;i<row1;i++)
{
for(j=0;j<col1;j++)
{
a[i][j]= i*row1+j;
}
}
for(i=0;i<row2;i++)
{
for(j=0;j<col2;j++)
{
b[i][j]=i*row2+j;
}
}
cudaMalloc((void **)&d,row1*col1*sizeof(LLI));
cudaMalloc((void **)&e,row2*col2*sizeof(LLI));
cudaMalloc((void **)&f,row1*col2*sizeof(LLI));
cudaMemcpy(d,a,row1*col1*sizeof(LLI),cudaMemcpyHostToDevice);
cudaMemcpy(e,b,row2*col2*sizeof(LLI),cudaMemcpyHostToDevice);
dim3 grid(col2,row1);
/* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */
matproductsharedmemory<<<grid,col1>>>(d,e,f);
cudaMemcpy(c,f,row1*col2*sizeof(LLI),cudaMemcpyDeviceToHost);
/*
printf("\n Product of two matrices:\n ");
for(i=0;i<row1;i++)
{
for(j=0;j<col2;j++)
{
printf("%Ld\t",c[i][j]);
}
printf("\n");
}
*/
cudaFree(d);
cudaFree(e);
cudaFree(f);
return 0;
}
/*
OUTPUT profile
==13287== NVPROF is profiling process 13287, command: ./a.out
==13287== Profiling application: ./a.out
==13287== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 94.72% 2.5322ms 1 2.5322ms 2.5322ms 2.5322ms matproductsharedmemory(__int64*, __int64*, __int64*)
3.68% 98.338us 2 49.169us 49.025us 49.313us [CUDA memcpy HtoD]
1.61% 42.913us 1 42.913us 42.913us 42.913us [CUDA memcpy DtoH]
API calls: 98.22% 189.54ms 3 63.178ms 5.3290us 189.52ms cudaMalloc
1.43% 2.7661ms 3 922.02us 26.698us 2.6712ms cudaMemcpy
0.19% 361.76us 94 3.8480us 170ns 233.68us cuDeviceGetAttribute
0.08% 150.22us 3 50.073us 6.2080us 110.67us cudaFree
0.05% 89.941us 1 89.941us 89.941us 89.941us cuDeviceTotalMem
0.01% 27.216us 1 27.216us 27.216us 27.216us cuDeviceGetName
0.01% 24.939us 1 24.939us 24.939us 24.939us cudaLaunch
0.00% 2.2690us 3 756ns 186ns 1.7650us cuDeviceGetCount
0.00% 1.0820us 2 541ns 239ns 843ns cuDeviceGet
0.00% 955ns 3 318ns 172ns 542ns cudaSetupArgument
0.00% 724ns 1 724ns 724ns 724ns cudaConfigureCall
*/ |
31e1bca0e9b03205c41e017d5375719c668b1b7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This software is Copyright (c) 2011 Lukas Odzioba <lukas dot odzioba at gmail dot com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "../cuda_cryptsha256.h"
#include "cuda_common.cuh"
extern "C" void sha256_crypt_gpu(crypt_sha256_password * inbuffer,
crypt_sha256_hash * outbuffer, crypt_sha256_salt * host_salt);
__constant__ crypt_sha256_salt cuda_salt[1];
__constant__ uint32_t k[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe,
0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa,
0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb,
0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624,
0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb,
0xbef9a3f7, 0xc67178f2
};
__device__ void init_ctx(sha256_ctx * ctx)
{
ctx->H[0] = 0x6a09e667;
ctx->H[1] = 0xbb67ae85;
ctx->H[2] = 0x3c6ef372;
ctx->H[3] = 0xa54ff53a;
ctx->H[4] = 0x510e527f;
ctx->H[5] = 0x9b05688c;
ctx->H[6] = 0x1f83d9ab;
ctx->H[7] = 0x5be0cd19;
ctx->total = 0;
ctx->buflen = 0;
}
__device__ void insert_to_buffer(sha256_ctx * ctx, const uint8_t * string,
uint8_t len)
{
int i = len;
uint8_t *d = &ctx->buffer[ctx->buflen];
while (i--)
*d++ = *string++;
ctx->buflen += len;
}
__device__ void sha256_block(sha256_ctx * ctx)
{
int i;
uint32_t a = ctx->H[0];
uint32_t b = ctx->H[1];
uint32_t c = ctx->H[2];
uint32_t d = ctx->H[3];
uint32_t e = ctx->H[4];
uint32_t f = ctx->H[5];
uint32_t g = ctx->H[6];
uint32_t h = ctx->H[7];
uint32_t w[16];
uint32_t *data = (uint32_t *) ctx->buffer;
#pragma unroll 16
for (i = 0; i < 16; i++)
w[i] = SWAP(data[i]);
uint32_t t1, t2;
for (i = 0; i < 16; i++) {
t1 = k[i] + w[i] + h + Sigma1(e) + Ch(e, f, g);
t2 = Maj(a, b, c) + Sigma0(a);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
for (i = 16; i < 64; i++) {
w[i & 15] =
sigma1(w[(i - 2) & 15]) + sigma0(w[(i - 15) & 15]) + w[(i -
16) & 15] + w[(i - 7) & 15];
t1 = k[i] + w[i & 15] + h + Sigma1(e) + Ch(e, f, g);
t2 = Maj(a, b, c) + Sigma0(a);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->H[0] += a;
ctx->H[1] += b;
ctx->H[2] += c;
ctx->H[3] += d;
ctx->H[4] += e;
ctx->H[5] += f;
ctx->H[6] += g;
ctx->H[7] += h;
}
__device__ void ctx_update(sha256_ctx * ctx, const char *string, uint8_t len)
{
ctx->total += len;
uint8_t startpos = ctx->buflen;
uint8_t partsize;
if (startpos + len <= 64) {
partsize = len;
} else
partsize = 64 - startpos;
insert_to_buffer(ctx, (const uint8_t *) string, partsize);
if (ctx->buflen == 64) {
uint8_t offset = 64 - startpos;
sha256_block(ctx);
ctx->buflen = 0;
insert_to_buffer(ctx, (const uint8_t *) (string + offset),
len - offset);
}
}
/**
Add 0x80 byte to ctx->buffer and clean the rest of it
**/
__device__ void ctx_append_1(sha256_ctx * ctx)
{
int i = 63 - ctx->buflen;
uint8_t *d = &ctx->buffer[ctx->buflen];
*d++ = 0x80;
while (i--)
{
*d++ = 0;
}
}
/**
Add ctx->bufflen at the end of ctx->buffer
**/
__device__ void ctx_add_length(sha256_ctx * ctx)
{
uint32_t *blocks = (uint32_t *) ctx->buffer;
blocks[15] = SWAP(ctx->total * 8);
}
__device__ void finish_ctx(sha256_ctx * ctx)
{
ctx_append_1(ctx);
ctx_add_length(ctx);
ctx->buflen = 0;
}
__device__ void clear_ctx_buffer(sha256_ctx * ctx)
{
uint32_t *w = (uint32_t *) ctx->buffer;
#pragma unroll 16
for (int i = 0; i < 16; i++)
w[i] = 0;
ctx->buflen = 0;
}
__device__ void sha256_digest(sha256_ctx * ctx, uint32_t * result)
{
uint8_t i;
if (ctx->buflen <= 55) { //data+0x80+datasize fits in one 512bit block
finish_ctx(ctx);
sha256_block(ctx);
} else {
uint8_t moved = 1;
if (ctx->buflen < 64) { //data and 0x80 fits in one block
ctx_append_1(ctx);
moved = 0;
}
sha256_block(ctx);
clear_ctx_buffer(ctx);
if (moved)
ctx->buffer[0] = 0x80; //append 1,the rest is already clean
ctx_add_length(ctx);
sha256_block(ctx);
}
#pragma unroll 8
for (i = 0; i < 8; i++)
result[i] = SWAP(ctx->H[i]);
}
__device__ void sha256crypt(const char *pass, uint8_t passlength,
uint32_t * tresult, uint32_t idx, uint32_t rounds)
{
uint32_t i, alt_result[8], temp_result[8];
sha256_ctx ctx, alt_ctx;
init_ctx(&ctx);
init_ctx(&alt_ctx);
ctx_update(&ctx, pass, passlength);
ctx_update(&ctx, cuda_salt[0].salt, cuda_salt[0].saltlen);
ctx_update(&alt_ctx, pass, passlength);
ctx_update(&alt_ctx, cuda_salt[0].salt, cuda_salt[0].saltlen);
ctx_update(&alt_ctx, pass, passlength);
sha256_digest(&alt_ctx, alt_result);
ctx_update(&ctx, (const char *) alt_result, passlength);
for (i = passlength; i > 0; i >>= 1) {
if ((i & 1) != 0)
ctx_update(&ctx, (const char *) alt_result, 32);
else
ctx_update(&ctx, pass, passlength);
}
sha256_digest(&ctx, alt_result);
init_ctx(&alt_ctx);
for (i = 0; i < passlength; i++)
ctx_update(&alt_ctx, pass, passlength);
sha256_digest(&alt_ctx, temp_result);
__shared__ char sp_sequence[THREADS][16+4];
char *p_sequence=sp_sequence[threadIdx.x];
memcpy(p_sequence, temp_result, passlength);
init_ctx(&alt_ctx);
for (i = 0; i < 16 + ((unsigned char *) alt_result)[0]; i++)
ctx_update(&alt_ctx, cuda_salt[0].salt, cuda_salt[0].saltlen);
sha256_digest(&alt_ctx, temp_result);
uint8_t saltlength = cuda_salt[0].saltlen;
__shared__ char ss_sequence[THREADS][16+4];
char *s_sequence=ss_sequence[threadIdx.x];
memcpy(s_sequence, temp_result, saltlength);
for (i = 0; i < rounds; i++) {
init_ctx(&ctx);
if ((i & 1) != 0)
ctx_update(&ctx, p_sequence, passlength);
else
ctx_update(&ctx, (const char *) alt_result, 32);
if ((i % 3) != 0)
ctx_update(&ctx, s_sequence, saltlength);
if ((i % 7) != 0)
ctx_update(&ctx, p_sequence, passlength);
if ((i & 1) != 0)
ctx_update(&ctx, (const char *) alt_result, 32);
else
ctx_update(&ctx, p_sequence, passlength);
sha256_digest(&ctx, alt_result);
}
__syncthreads();
#pragma unroll 8
for (i = 0; i < 8; i++)
tresult[hash_addr(i, idx)] = alt_result[i];
}
__global__ void kernel_crypt_r(crypt_sha256_password * inbuffer,
uint32_t * outbuffer)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
sha256crypt((const char *) inbuffer[idx].v, inbuffer[idx].length,
outbuffer, idx, cuda_salt[0].rounds);
}
void sha256_crypt_gpu(crypt_sha256_password * inbuffer,
crypt_sha256_hash * outbuffer, crypt_sha256_salt * host_salt)
{
HANDLE_ERROR(hipMemcpyToSymbol(cuda_salt, host_salt,
sizeof(crypt_sha256_salt)));
crypt_sha256_password *cuda_inbuffer;
uint32_t *cuda_outbuffer;
size_t insize = sizeof(crypt_sha256_password) * KEYS_PER_CRYPT;
size_t outsize = sizeof(crypt_sha256_hash) * KEYS_PER_CRYPT;
HANDLE_ERROR(hipMalloc(&cuda_inbuffer, insize));
HANDLE_ERROR(hipMalloc(&cuda_outbuffer, outsize));
HANDLE_ERROR(hipMemcpy(cuda_inbuffer, inbuffer, insize,
hipMemcpyHostToDevice));
dim3 dimGrid(BLOCKS);
dim3 dimBlock(THREADS);
hipLaunchKernelGGL(( kernel_crypt_r) , dim3(dimGrid), dim3(dimBlock) , 0, 0, cuda_inbuffer,
cuda_outbuffer);
hipDeviceSynchronize();
HANDLE_ERROR(hipMemcpy(outbuffer, cuda_outbuffer, outsize,
hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(cuda_inbuffer));
HANDLE_ERROR(hipFree(cuda_outbuffer));
}
| 31e1bca0e9b03205c41e017d5375719c668b1b7f.cu | /*
* This software is Copyright (c) 2011 Lukas Odzioba <lukas dot odzioba at gmail dot com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "../cuda_cryptsha256.h"
#include "cuda_common.cuh"
extern "C" void sha256_crypt_gpu(crypt_sha256_password * inbuffer,
crypt_sha256_hash * outbuffer, crypt_sha256_salt * host_salt);
__constant__ crypt_sha256_salt cuda_salt[1];
__constant__ uint32_t k[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe,
0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa,
0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb,
0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624,
0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb,
0xbef9a3f7, 0xc67178f2
};
__device__ void init_ctx(sha256_ctx * ctx)
{
ctx->H[0] = 0x6a09e667;
ctx->H[1] = 0xbb67ae85;
ctx->H[2] = 0x3c6ef372;
ctx->H[3] = 0xa54ff53a;
ctx->H[4] = 0x510e527f;
ctx->H[5] = 0x9b05688c;
ctx->H[6] = 0x1f83d9ab;
ctx->H[7] = 0x5be0cd19;
ctx->total = 0;
ctx->buflen = 0;
}
__device__ void insert_to_buffer(sha256_ctx * ctx, const uint8_t * string,
uint8_t len)
{
int i = len;
uint8_t *d = &ctx->buffer[ctx->buflen];
while (i--)
*d++ = *string++;
ctx->buflen += len;
}
__device__ void sha256_block(sha256_ctx * ctx)
{
int i;
uint32_t a = ctx->H[0];
uint32_t b = ctx->H[1];
uint32_t c = ctx->H[2];
uint32_t d = ctx->H[3];
uint32_t e = ctx->H[4];
uint32_t f = ctx->H[5];
uint32_t g = ctx->H[6];
uint32_t h = ctx->H[7];
uint32_t w[16];
uint32_t *data = (uint32_t *) ctx->buffer;
#pragma unroll 16
for (i = 0; i < 16; i++)
w[i] = SWAP(data[i]);
uint32_t t1, t2;
for (i = 0; i < 16; i++) {
t1 = k[i] + w[i] + h + Sigma1(e) + Ch(e, f, g);
t2 = Maj(a, b, c) + Sigma0(a);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
for (i = 16; i < 64; i++) {
w[i & 15] =
sigma1(w[(i - 2) & 15]) + sigma0(w[(i - 15) & 15]) + w[(i -
16) & 15] + w[(i - 7) & 15];
t1 = k[i] + w[i & 15] + h + Sigma1(e) + Ch(e, f, g);
t2 = Maj(a, b, c) + Sigma0(a);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx->H[0] += a;
ctx->H[1] += b;
ctx->H[2] += c;
ctx->H[3] += d;
ctx->H[4] += e;
ctx->H[5] += f;
ctx->H[6] += g;
ctx->H[7] += h;
}
__device__ void ctx_update(sha256_ctx * ctx, const char *string, uint8_t len)
{
ctx->total += len;
uint8_t startpos = ctx->buflen;
uint8_t partsize;
if (startpos + len <= 64) {
partsize = len;
} else
partsize = 64 - startpos;
insert_to_buffer(ctx, (const uint8_t *) string, partsize);
if (ctx->buflen == 64) {
uint8_t offset = 64 - startpos;
sha256_block(ctx);
ctx->buflen = 0;
insert_to_buffer(ctx, (const uint8_t *) (string + offset),
len - offset);
}
}
/**
Add 0x80 byte to ctx->buffer and clean the rest of it
**/
__device__ void ctx_append_1(sha256_ctx * ctx)
{
int i = 63 - ctx->buflen;
uint8_t *d = &ctx->buffer[ctx->buflen];
*d++ = 0x80;
while (i--)
{
*d++ = 0;
}
}
/**
Add ctx->bufflen at the end of ctx->buffer
**/
__device__ void ctx_add_length(sha256_ctx * ctx)
{
uint32_t *blocks = (uint32_t *) ctx->buffer;
blocks[15] = SWAP(ctx->total * 8);
}
__device__ void finish_ctx(sha256_ctx * ctx)
{
ctx_append_1(ctx);
ctx_add_length(ctx);
ctx->buflen = 0;
}
__device__ void clear_ctx_buffer(sha256_ctx * ctx)
{
uint32_t *w = (uint32_t *) ctx->buffer;
#pragma unroll 16
for (int i = 0; i < 16; i++)
w[i] = 0;
ctx->buflen = 0;
}
__device__ void sha256_digest(sha256_ctx * ctx, uint32_t * result)
{
uint8_t i;
if (ctx->buflen <= 55) { //data+0x80+datasize fits in one 512bit block
finish_ctx(ctx);
sha256_block(ctx);
} else {
uint8_t moved = 1;
if (ctx->buflen < 64) { //data and 0x80 fits in one block
ctx_append_1(ctx);
moved = 0;
}
sha256_block(ctx);
clear_ctx_buffer(ctx);
if (moved)
ctx->buffer[0] = 0x80; //append 1,the rest is already clean
ctx_add_length(ctx);
sha256_block(ctx);
}
#pragma unroll 8
for (i = 0; i < 8; i++)
result[i] = SWAP(ctx->H[i]);
}
__device__ void sha256crypt(const char *pass, uint8_t passlength,
uint32_t * tresult, uint32_t idx, uint32_t rounds)
{
uint32_t i, alt_result[8], temp_result[8];
sha256_ctx ctx, alt_ctx;
init_ctx(&ctx);
init_ctx(&alt_ctx);
ctx_update(&ctx, pass, passlength);
ctx_update(&ctx, cuda_salt[0].salt, cuda_salt[0].saltlen);
ctx_update(&alt_ctx, pass, passlength);
ctx_update(&alt_ctx, cuda_salt[0].salt, cuda_salt[0].saltlen);
ctx_update(&alt_ctx, pass, passlength);
sha256_digest(&alt_ctx, alt_result);
ctx_update(&ctx, (const char *) alt_result, passlength);
for (i = passlength; i > 0; i >>= 1) {
if ((i & 1) != 0)
ctx_update(&ctx, (const char *) alt_result, 32);
else
ctx_update(&ctx, pass, passlength);
}
sha256_digest(&ctx, alt_result);
init_ctx(&alt_ctx);
for (i = 0; i < passlength; i++)
ctx_update(&alt_ctx, pass, passlength);
sha256_digest(&alt_ctx, temp_result);
__shared__ char sp_sequence[THREADS][16+4];
char *p_sequence=sp_sequence[threadIdx.x];
memcpy(p_sequence, temp_result, passlength);
init_ctx(&alt_ctx);
for (i = 0; i < 16 + ((unsigned char *) alt_result)[0]; i++)
ctx_update(&alt_ctx, cuda_salt[0].salt, cuda_salt[0].saltlen);
sha256_digest(&alt_ctx, temp_result);
uint8_t saltlength = cuda_salt[0].saltlen;
__shared__ char ss_sequence[THREADS][16+4];
char *s_sequence=ss_sequence[threadIdx.x];
memcpy(s_sequence, temp_result, saltlength);
for (i = 0; i < rounds; i++) {
init_ctx(&ctx);
if ((i & 1) != 0)
ctx_update(&ctx, p_sequence, passlength);
else
ctx_update(&ctx, (const char *) alt_result, 32);
if ((i % 3) != 0)
ctx_update(&ctx, s_sequence, saltlength);
if ((i % 7) != 0)
ctx_update(&ctx, p_sequence, passlength);
if ((i & 1) != 0)
ctx_update(&ctx, (const char *) alt_result, 32);
else
ctx_update(&ctx, p_sequence, passlength);
sha256_digest(&ctx, alt_result);
}
__syncthreads();
#pragma unroll 8
for (i = 0; i < 8; i++)
tresult[hash_addr(i, idx)] = alt_result[i];
}
__global__ void kernel_crypt_r(crypt_sha256_password * inbuffer,
uint32_t * outbuffer)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
sha256crypt((const char *) inbuffer[idx].v, inbuffer[idx].length,
outbuffer, idx, cuda_salt[0].rounds);
}
void sha256_crypt_gpu(crypt_sha256_password * inbuffer,
crypt_sha256_hash * outbuffer, crypt_sha256_salt * host_salt)
{
HANDLE_ERROR(cudaMemcpyToSymbol(cuda_salt, host_salt,
sizeof(crypt_sha256_salt)));
crypt_sha256_password *cuda_inbuffer;
uint32_t *cuda_outbuffer;
size_t insize = sizeof(crypt_sha256_password) * KEYS_PER_CRYPT;
size_t outsize = sizeof(crypt_sha256_hash) * KEYS_PER_CRYPT;
HANDLE_ERROR(cudaMalloc(&cuda_inbuffer, insize));
HANDLE_ERROR(cudaMalloc(&cuda_outbuffer, outsize));
HANDLE_ERROR(cudaMemcpy(cuda_inbuffer, inbuffer, insize,
cudaMemcpyHostToDevice));
dim3 dimGrid(BLOCKS);
dim3 dimBlock(THREADS);
kernel_crypt_r <<< dimGrid, dimBlock >>> (cuda_inbuffer,
cuda_outbuffer);
cudaThreadSynchronize();
HANDLE_ERROR(cudaMemcpy(outbuffer, cuda_outbuffer, outsize,
cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(cuda_inbuffer));
HANDLE_ERROR(cudaFree(cuda_outbuffer));
}
|
40bb0fb5b431f4b39165946210a1684625ac54ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialAveragePooling.cu"
#else
#include "../common.h"
static inline void THNN_(SpatialAveragePooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
int kH, int kW, int dH, int dW, int padH, int padW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size(dimh-1);
int64_t nInputRows = input->size(dimh);
int64_t nInputCols = input->size(dimw);
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
}
void THNN_(SpatialAveragePooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THCUNN_assertSameGPU(state, 2, input, output);
THNN_(SpatialAveragePooling_shapeCheck)
(state, input, NULL, kH, kW, dH, dW,
padH, padW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->dim() == 3) {
nInputCols = input->size(2);
nInputRows = input->size(1);
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
nInputCols = input->size(3);
nInputRows = input->size(2);
nInputPlane = input->size(1);
batchSize = input->size(0);
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
real* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
real* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
if(count_include_pad)
hipLaunchKernelGGL(( AvePoolForward<real, accreal, true>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
else
hipLaunchKernelGGL(( AvePoolForward<real, accreal, false>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
THCudaCheck(hipGetLastError());
if(input->dim() == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialAveragePooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
THNN_(SpatialAveragePooling_shapeCheck)
(state, input, gradOutput, kH, kW, dH, dW,
padH, padW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
int dimCol = 2;
int dimRow = 1;
if (input->dim() == 3) {
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
dimCol = 3;
dimRow = 2;
nInputPlane = input->size(1);
batchSize = input->size(0);
}
nInputCols = input->size(dimCol);
nInputRows = input->size(dimRow);
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
THCUNN_check_dim_size(state, gradOutput, input->dim(), dimRow, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, input->dim(), dimCol, nOutputCols);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
if(count_include_pad)
hipLaunchKernelGGL(( AvePoolBackward<real, accreal, true>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCTensor_(data)(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCTensor_(data)(state, gradInput));
else
hipLaunchKernelGGL(( AvePoolBackward<real, accreal, false>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count,
THCTensor_(data)(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCTensor_(data)(state, gradInput));
THCudaCheck(hipGetLastError());
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
| 40bb0fb5b431f4b39165946210a1684625ac54ac.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialAveragePooling.cu"
#else
#include "../common.h"
static inline void THNN_(SpatialAveragePooling_shapeCheck)(
THCState *state,
THCTensor *input, THCTensor *gradOutput,
int kH, int kW, int dH, int dW, int padH, int padW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THCUNN_argCheck(state, !input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size(dimh-1);
int64_t nInputRows = input->size(dimh);
int64_t nInputCols = input->size(dimw);
int64_t nOutputRows, nOutputCols;
int64_t nOutputPlane = nInputPlane;
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
if (nOutputCols < 1 || nOutputRows < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane);
THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols);
}
}
void THNN_(SpatialAveragePooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THCUNN_assertSameGPU(state, 2, input, output);
THNN_(SpatialAveragePooling_shapeCheck)
(state, input, NULL, kH, kW, dH, dW,
padH, padW, ceil_mode);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
if (input->dim() == 3) {
nInputCols = input->size(2);
nInputRows = input->size(1);
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
nInputCols = input->size(3);
nInputRows = input->size(2);
nInputPlane = input->size(1);
batchSize = input->size(0);
}
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input = THCTensor_(newContiguous)(state, input);
real* input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols);
real* output_data = THCTensor_(data)(state, output);
int count = THCTensor_(nElement)(state, output);
if(count_include_pad)
AvePoolForward<real, accreal, true>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
else
AvePoolForward<real, accreal, false>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>(
count, input_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW, output_data);
THCudaCheck(cudaGetLastError());
if(input->dim() == 3)
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
THCTensor_(free)(state, input);
}
void THNN_(SpatialAveragePooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int kW, int kH,
int dW, int dH,
int padW, int padH,
bool ceil_mode,
bool count_include_pad)
{
THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput);
THNN_(SpatialAveragePooling_shapeCheck)
(state, input, gradOutput, kH, kW, dH, dW,
padH, padW, ceil_mode);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int64_t nInputCols, nInputRows, nInputPlane, batchSize;
int64_t nOutputCols, nOutputRows;
int dimCol = 2;
int dimRow = 1;
if (input->dim() == 3) {
nInputPlane = input->size(0);
batchSize = 1;
}
else
{
dimCol = 3;
dimRow = 2;
nInputPlane = input->size(1);
batchSize = input->size(0);
}
nInputCols = input->size(dimCol);
nInputRows = input->size(dimRow);
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
THCUNN_check_dim_size(state, gradOutput, input->dim(), dimRow, nOutputRows);
THCUNN_check_dim_size(state, gradOutput, input->dim(), dimCol, nOutputCols);
THCTensor_(resizeAs)(state, gradInput, input);
int count = THCTensor_(nElement)(state, input);
if(count_include_pad)
AvePoolBackward<real, accreal, true>
<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCTensor_(data)(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCTensor_(data)(state, gradInput));
else
AvePoolBackward<real, accreal, false>
<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count,
THCTensor_(data)(state, gradOutput),
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW,
THCTensor_(data)(state, gradInput));
THCudaCheck(cudaGetLastError());
// clean
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
#endif
|
3001a63932f6f53b84647f0f7c15c2b13a74e545.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rectified_linear_kernel( float4 * __restrict output, const float4 * __restrict input, float negative_slope, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
if (val.x < 0.0F)
val.x *= negative_slope;
if (val.y < 0.0F)
val.y *= negative_slope;
if (val.z < 0.0F)
val.z *= negative_slope;
if (val.w < 0.0F)
val.w *= negative_slope;
output[elem_id] = val;
}
} | 3001a63932f6f53b84647f0f7c15c2b13a74e545.cu | #include "includes.h"
__global__ void rectified_linear_kernel( float4 * __restrict output, const float4 * __restrict input, float negative_slope, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input[elem_id];
if (val.x < 0.0F)
val.x *= negative_slope;
if (val.y < 0.0F)
val.y *= negative_slope;
if (val.z < 0.0F)
val.z *= negative_slope;
if (val.w < 0.0F)
val.w *= negative_slope;
output[elem_id] = val;
}
} |
51c1da64253917ef54a38a45794f03e50a40477e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <bits/stdc++.h>
# include <hip/hip_runtime.h>
#define TILE_WIDTH 32 //(TITLE_WIDTH = BLOCKSIZE)
using namespace std;
// ::::::::::::::::::::::::::::::::::::::::::GPU::::::::::::::::::::::::::::::::
__global__ void KernelNormalMul(float *Mat1,float *Mat2,float *Mat3,int m,int n,int p){
int j = threadIdx.y + blockDim.y * blockIdx.y; // row
int i = threadIdx.x + blockDim.x * blockIdx.x; // col
if((j<m) && (i<p)){
float value=0.0;
for(int k=0;k<n;++k){
value+=Mat1[n*j+k]*Mat2[p*k+i];
}
Mat3[p*j+i]=value;
}
}
__global__ void KernelTilesMul(float *Mat1,float *Mat2,float *Mat3,int rowM1,int colM1,int colM2){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0.0;
for(int k = 0; k < (colM1+TILE_WIDTH-1)/(TILE_WIDTH); ++k){
if(k*TILE_WIDTH + tx < colM1 && row < rowM1){
Mds[ty][tx] = Mat1[row*colM1 + k*TILE_WIDTH + tx];
}else{
Mds[ty][tx] = 0.0;
}
if(k*TILE_WIDTH + ty < colM1 && col < colM2){
Nds[ty][tx] = Mat2[(k*TILE_WIDTH + ty) * colM2 + col];
}else{
Nds[ty][tx] =0.0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < rowM1 && col < colM2){
Mat3[row*colM2+col] = Pvalue;
}
}
void d_MatrixMult(float *Mat1,float *Mat2,float *Mat3,int rowM1,int colM1,int colM2, int op ){
float * d_Mat1;
float * d_Mat2;
float * d_Mat3;
float Blocksize=TILE_WIDTH; // Bloque de 2 dimensiones 32*32=256 nmero de blokes= 1024 (1024/256=4)
int size1=rowM1*colM1;
int size2=colM1*colM2;
int size3=rowM1*colM2;
// 1. Separamos memoria en el device
hipMalloc(&d_Mat1,size1*sizeof(float));
hipMalloc(&d_Mat2,size2*sizeof(float));
hipMalloc(&d_Mat3,size3*sizeof(float));
// 2. Copiamos el valor de las variables de host a las variables del device.
hipMemcpy(d_Mat1, Mat1,size1*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_Mat2, Mat2,size2*sizeof(float),hipMemcpyHostToDevice);
// 3. Lgica de bloques e hilos, elementos para realizar la parelelizacin.
dim3 dimGrid(ceil(colM2/Blocksize),ceil(rowM1/Blocksize),1);
//dim3 dimGrid((m+Blocksize-1)/Blocksize,(p+Blocksize-1)/Blocksize,1);
dim3 dimBlock(Blocksize,Blocksize,1);
// 4. Invocacin del kernel (invocin del host, ejecutadas en el device), <<<<#dimGrid,#dimBlock>>>
if(op==1)hipLaunchKernelGGL(({KernelNormalMul), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);}else{
hipLaunchKernelGGL(( KernelTilesMul), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);
}
// 5. Copiamos el resultado para mostrar en el I/O del host.
hipMemcpy (Mat3,d_Mat3,size3*sizeof(float),hipMemcpyDeviceToHost);
// 6. Liberamos memoria.
hipFree(d_Mat3);
}
// :::::::::::::::::::::::::::::::::::::::Normal::::::::::::::::::::::::::::::::
void h_Mul_Mat(float *Mat1,float *Mat2, float *Mat3,int m,int n,int p){
for(int i=0;i<m;i++){
for(int j=0;j<p;j++){
float value=0.0;
for(int k=0;k<n;k++){
value+=Mat1[n*i+k]*Mat2[p*k+j];
}
Mat3[p*i+j]=value;
}
}
}
void llena_mat(float *Mat, float Value,int m,int n){// ver matriz como vector serial.
int size=n*m; // matriz lineal
for(int i =0 ; i<size ; i++){
Mat[i]=Value;
}
}
void mostrar_mat(float *Mat,int m,int n){//
int size=n*m; // matriz lineal
for (int i=0;i<size;i++) {
if(i%n==0 && n!=0){
cout<<endl;
}
cout<<"["<<Mat[i]<<"] ";
}
cout<<endl;
}
int check_mat(float *Mat1,float *Mat2,int m,int p){
for(int i=0; i<(m*p);++i){
if(Mat1[i]!=Mat2[i]){
cout<<"Error, Las matrices no son iguales"<<endl;
return 0;
}
}
cout<<"Las Matrices son iguales"<<endl;
return 0;
}
int check_mat_float(float *Mat1,float *Mat2,int m,int p){
for(int i=0; i<(m*p);++i){
if(fabs(Mat1[i]-Mat2[i]) > 0.1){
cout<<"Error, Las matrices no son iguales"<<endl;
return 0;
}
}
cout<<"Las Matrices son iguales"<<endl;
return 0;
}
// :::::::::::::::::::::::::::::::::::Clock Function::::::::::::::::::::::::::::
double diffclock(clock_t clock1,clock_t clock2){
double diffticks=clock2-clock1;
double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili
return diffms;
}
// :::::::::::::::::::::::::::::::::::::::Main::::::::::::::::::::::::::::::::.
int main(){
double T1,T2,T3; // variables de tiempo
int rowM1=2048;
int colM1=1024;
int colM2=1200;
float *Mat1 = (float*)malloc((rowM1*colM1)*sizeof(float));
float *Mat2 = (float*)malloc((colM1*colM2)*sizeof(float));
float *Mat3 = (float*)malloc((rowM1*colM2)*sizeof(float));
float *Mat4 = (float*)malloc((rowM1*colM2)*sizeof(float));
float *Mat5 = (float*)malloc((rowM1*colM2)*sizeof(float));
llena_mat(Mat1,1.0,rowM1,colM1);
llena_mat(Mat2,2.0,colM1,colM2);
llena_mat(Mat3,0.0,rowM1,colM2);
llena_mat(Mat4,0.0,rowM1,colM2);
llena_mat(Mat5,0.0,rowM1,colM2);
clock_t start = clock();
h_Mul_Mat(Mat1,Mat2,Mat3,rowM1,colM1,colM2);
clock_t end = clock();
T1=diffclock(start,end);
cout <<"Tiempo secuencial: "<<T1<<endl;
//mostrar_mat(Mat3,rowM1,colM2);
clock_t start2 = clock();
d_MatrixMult(Mat1,Mat2,Mat4,rowM1,colM1,colM2,1); // paralelo
clock_t end2 = clock();
//mostrar_mat(Mat4,rowM1,colM2);
T2=diffclock(start2,end2);
cout <<"Tiempo Paralelo: "<<T2<<endl;
cout<<"Aceleracin lograda: "<<T1/T2<<endl;
check_mat_float(Mat3,Mat4,rowM1,colM2);
clock_t start3 = clock();
d_MatrixMult(Mat1,Mat2,Mat5,rowM1,colM1,colM2,2); // tiles
//mostrar_mat(Mat5,rowM1,colM2);
clock_t end3 = clock();
T3=diffclock(start3,end3);
cout <<"Tiempo Paralelo con Tiles: "<<T3<<endl;
cout<<"Aceleracin lograda Respecto a el tiempo paralelo: "<<T2/T3<<endl;
check_mat_float(Mat4,Mat5,rowM1,colM2);
free(Mat1);
free(Mat2);
free(Mat3);
free(Mat4);
free(Mat5);
return 0;
}
// http://www.techdarting.com/2014/03/matrix-multiplication-in-cuda-using.html
| 51c1da64253917ef54a38a45794f03e50a40477e.cu | # include <bits/stdc++.h>
# include <cuda.h>
#define TILE_WIDTH 32 //(TITLE_WIDTH = BLOCKSIZE)
using namespace std;
// ::::::::::::::::::::::::::::::::::::::::::GPU::::::::::::::::::::::::::::::::
__global__ void KernelNormalMul(float *Mat1,float *Mat2,float *Mat3,int m,int n,int p){
int j = threadIdx.y + blockDim.y * blockIdx.y; // row
int i = threadIdx.x + blockDim.x * blockIdx.x; // col
if((j<m) && (i<p)){
float value=0.0;
for(int k=0;k<n;++k){
value+=Mat1[n*j+k]*Mat2[p*k+i];
}
Mat3[p*j+i]=value;
}
}
__global__ void KernelTilesMul(float *Mat1,float *Mat2,float *Mat3,int rowM1,int colM1,int colM2){
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0.0;
for(int k = 0; k < (colM1+TILE_WIDTH-1)/(TILE_WIDTH); ++k){
if(k*TILE_WIDTH + tx < colM1 && row < rowM1){
Mds[ty][tx] = Mat1[row*colM1 + k*TILE_WIDTH + tx];
}else{
Mds[ty][tx] = 0.0;
}
if(k*TILE_WIDTH + ty < colM1 && col < colM2){
Nds[ty][tx] = Mat2[(k*TILE_WIDTH + ty) * colM2 + col];
}else{
Nds[ty][tx] =0.0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if (row < rowM1 && col < colM2){
Mat3[row*colM2+col] = Pvalue;
}
}
void d_MatrixMult(float *Mat1,float *Mat2,float *Mat3,int rowM1,int colM1,int colM2, int op ){
float * d_Mat1;
float * d_Mat2;
float * d_Mat3;
float Blocksize=TILE_WIDTH; // Bloque de 2 dimensiones 32*32=256 número de blokes= 1024 (1024/256=4)
int size1=rowM1*colM1;
int size2=colM1*colM2;
int size3=rowM1*colM2;
// 1. Separamos memoria en el device
cudaMalloc(&d_Mat1,size1*sizeof(float));
cudaMalloc(&d_Mat2,size2*sizeof(float));
cudaMalloc(&d_Mat3,size3*sizeof(float));
// 2. Copiamos el valor de las variables de host a las variables del device.
cudaMemcpy(d_Mat1, Mat1,size1*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_Mat2, Mat2,size2*sizeof(float),cudaMemcpyHostToDevice);
// 3. Lógica de bloques e hilos, elementos para realizar la parelelización.
dim3 dimGrid(ceil(colM2/Blocksize),ceil(rowM1/Blocksize),1);
//dim3 dimGrid((m+Blocksize-1)/Blocksize,(p+Blocksize-1)/Blocksize,1);
dim3 dimBlock(Blocksize,Blocksize,1);
// 4. Invocación del kernel (invoción del host, ejecutadas en el device), <<<<#dimGrid,#dimBlock>>>
if(op==1){KernelNormalMul<<<dimGrid,dimBlock>>>(d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);}else{
KernelTilesMul<<<dimGrid,dimBlock>>>(d_Mat1,d_Mat2,d_Mat3,rowM1,colM1,colM2);
}
// 5. Copiamos el resultado para mostrar en el I/O del host.
cudaMemcpy (Mat3,d_Mat3,size3*sizeof(float),cudaMemcpyDeviceToHost);
// 6. Liberamos memoria.
cudaFree(d_Mat3);
}
// :::::::::::::::::::::::::::::::::::::::Normal::::::::::::::::::::::::::::::::
void h_Mul_Mat(float *Mat1,float *Mat2, float *Mat3,int m,int n,int p){
for(int i=0;i<m;i++){
for(int j=0;j<p;j++){
float value=0.0;
for(int k=0;k<n;k++){
value+=Mat1[n*i+k]*Mat2[p*k+j];
}
Mat3[p*i+j]=value;
}
}
}
void llena_mat(float *Mat, float Value,int m,int n){// ver matriz como vector serial.
int size=n*m; // matriz lineal
for(int i =0 ; i<size ; i++){
Mat[i]=Value;
}
}
void mostrar_mat(float *Mat,int m,int n){//
int size=n*m; // matriz lineal
for (int i=0;i<size;i++) {
if(i%n==0 && n!=0){
cout<<endl;
}
cout<<"["<<Mat[i]<<"] ";
}
cout<<endl;
}
int check_mat(float *Mat1,float *Mat2,int m,int p){
for(int i=0; i<(m*p);++i){
if(Mat1[i]!=Mat2[i]){
cout<<"Error, Las matrices no son iguales"<<endl;
return 0;
}
}
cout<<"Las Matrices son iguales"<<endl;
return 0;
}
int check_mat_float(float *Mat1,float *Mat2,int m,int p){
for(int i=0; i<(m*p);++i){
if(fabs(Mat1[i]-Mat2[i]) > 0.1){
cout<<"Error, Las matrices no son iguales"<<endl;
return 0;
}
}
cout<<"Las Matrices son iguales"<<endl;
return 0;
}
// :::::::::::::::::::::::::::::::::::Clock Function::::::::::::::::::::::::::::
double diffclock(clock_t clock1,clock_t clock2){
double diffticks=clock2-clock1;
double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili
return diffms;
}
// :::::::::::::::::::::::::::::::::::::::Main::::::::::::::::::::::::::::::::.
int main(){
double T1,T2,T3; // variables de tiempo
int rowM1=2048;
int colM1=1024;
int colM2=1200;
float *Mat1 = (float*)malloc((rowM1*colM1)*sizeof(float));
float *Mat2 = (float*)malloc((colM1*colM2)*sizeof(float));
float *Mat3 = (float*)malloc((rowM1*colM2)*sizeof(float));
float *Mat4 = (float*)malloc((rowM1*colM2)*sizeof(float));
float *Mat5 = (float*)malloc((rowM1*colM2)*sizeof(float));
llena_mat(Mat1,1.0,rowM1,colM1);
llena_mat(Mat2,2.0,colM1,colM2);
llena_mat(Mat3,0.0,rowM1,colM2);
llena_mat(Mat4,0.0,rowM1,colM2);
llena_mat(Mat5,0.0,rowM1,colM2);
clock_t start = clock();
h_Mul_Mat(Mat1,Mat2,Mat3,rowM1,colM1,colM2);
clock_t end = clock();
T1=diffclock(start,end);
cout <<"Tiempo secuencial: "<<T1<<endl;
//mostrar_mat(Mat3,rowM1,colM2);
clock_t start2 = clock();
d_MatrixMult(Mat1,Mat2,Mat4,rowM1,colM1,colM2,1); // paralelo
clock_t end2 = clock();
//mostrar_mat(Mat4,rowM1,colM2);
T2=diffclock(start2,end2);
cout <<"Tiempo Paralelo: "<<T2<<endl;
cout<<"Aceleración lograda: "<<T1/T2<<endl;
check_mat_float(Mat3,Mat4,rowM1,colM2);
clock_t start3 = clock();
d_MatrixMult(Mat1,Mat2,Mat5,rowM1,colM1,colM2,2); // tiles
//mostrar_mat(Mat5,rowM1,colM2);
clock_t end3 = clock();
T3=diffclock(start3,end3);
cout <<"Tiempo Paralelo con Tiles: "<<T3<<endl;
cout<<"Aceleración lograda Respecto a el tiempo paralelo: "<<T2/T3<<endl;
check_mat_float(Mat4,Mat5,rowM1,colM2);
free(Mat1);
free(Mat2);
free(Mat3);
free(Mat4);
free(Mat5);
return 0;
}
// http://www.techdarting.com/2014/03/matrix-multiplication-in-cuda-using.html
|
93903c8ef840a14e31f56257be16f800701f669d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#include "aes2.h"
#define caes_mul(a, b) ((a)&&(b)?CiLogTable[(ClogTable[(a)]+ClogTable[(b)])%0xff]:0)
#define GET(M,X,Y) ((M)[((Y) << 2) + (X)])
int const THREADS = 512;
__device__ void SubBytes(uint8_t *estado) {
estado[threadIdx.x] = Sbox[estado[threadIdx.x]];
}
__device__ void InvSubBytes(uint8_t *estado) {
estado[threadIdx.x] = InvSbox[estado[threadIdx.x]];
}
__device__ void ShiftRows(uint8_t *estado) {
unsigned int idx = threadIdx.x;
int row = idx % 4;
uint8_t t;
t = estado[((idx + 4*row) % 16) + ((idx >> 4 ) << 4)];
__syncthreads();
estado[idx] = t;
}
__device__ void InvShiftRows(uint8_t *estado) {
unsigned int idx = threadIdx.x;
int row = idx % 4;
uint8_t t;
t = estado[((idx - 4*row) % 16) + ((idx >> 4 ) << 4)];
__syncthreads();
estado[idx] = t;
}
__device__ void MixColumns(uint8_t *estado) {
unsigned int idx = threadIdx.x;
int base = idx % 4;
uint8_t t;
if(base == 0) t = caes_mul(0x02, estado[idx]) ^ caes_mul(0x03, estado[idx+1]) ^ estado[idx+2] ^ estado[idx+3];
if(base == 1) t = estado[idx-1] ^ caes_mul(0x02, estado[idx]) ^ caes_mul(0x03, estado[idx+1]) ^ estado[idx+2];
if(base == 2) t = estado[idx-2] ^ estado[idx-1] ^ caes_mul(0x02, estado[idx]) ^ caes_mul(0x03, estado[idx+1]);
if(base == 3) t = caes_mul(0x03, estado[idx-3]) ^ estado[idx-2] ^ estado[idx-1] ^ caes_mul(0x02, estado[idx]);
__syncthreads();
estado[idx] = t;
}
__device__ void InvMixColumns(uint8_t *estado) {
unsigned int idx = threadIdx.x;
int base = idx % 4;
uint8_t t;
if(base == 0) t = caes_mul(0x0e, estado[idx]) ^ caes_mul(0x0b, estado[idx+1]) ^ caes_mul(0x0d, estado[idx+2]) ^ caes_mul(0x09, estado[idx+3]);
if(base == 1) t = caes_mul(0x09, estado[idx-1]) ^ caes_mul(0x0e, estado[idx]) ^ caes_mul(0x0b, estado[idx+1]) ^ caes_mul(0x0d, estado[idx+2]);
if(base == 2) t = caes_mul(0x0d, estado[idx-2]) ^ caes_mul(0x09, estado[idx-1]) ^ caes_mul(0x0e, estado[idx]) ^ caes_mul(0x0b, estado[idx+1]);
if(base == 3) t = caes_mul(0x0b, estado[idx-3]) ^ caes_mul(0x0d, estado[idx-2]) ^ caes_mul(0x09, estado[idx-1]) ^ caes_mul(0x0e, estado[idx]);
__syncthreads();
estado[idx] = t;
}
__device__ void AddRoundKey(uint8_t *estado, uint8_t *chave) {
estado[threadIdx.x] ^= chave[threadIdx.x % 16];
}
__global__ void InvAes(uint8_t *cp, uint8_t *cW, uint8_t Nr) {
__shared__ uint8_t estado[THREADS];
register int i;
estado[threadIdx.x] = cp[(blockIdx.x*blockDim.x)+(blockIdx.y*blockDim.x*gridDim.x)+threadIdx.x];
__syncthreads();
AddRoundKey(estado, cW+(Nr << 4));
for(i=Nr; i>1; i--) {
InvShiftRows(estado);
InvSubBytes(estado);
AddRoundKey(estado, cW+((i-1) << 4));
InvMixColumns(estado);
}
InvShiftRows(estado);
InvSubBytes(estado);
AddRoundKey(estado, cW);
__syncthreads();
cp[(blockIdx.x*blockDim.x)+(blockIdx.y*blockDim.x*gridDim.x)+threadIdx.x] = estado[threadIdx.x];
}
__global__ void Aes(uint8_t *cp, uint8_t *cW, uint8_t Nr) {
__shared__ uint8_t estado[THREADS];
register int i;
estado[threadIdx.x] = cp[(blockIdx.x*blockDim.x)+(blockIdx.y*blockDim.x*gridDim.x)+threadIdx.x];
__syncthreads();
AddRoundKey(estado, cW);
for(i=1; i<Nr; i++) {
SubBytes(estado);
ShiftRows(estado);
MixColumns(estado);
AddRoundKey(estado, cW+(i << 4));
}
SubBytes(estado);
ShiftRows(estado);
AddRoundKey(estado, cW+(i << 4));
__syncthreads();
cp[(blockIdx.x*blockDim.x)+(blockIdx.y*blockDim.x*gridDim.x)+threadIdx.x] = estado[threadIdx.x];
}
void ExpandKeys(uint8_t *key, uint8_t keysize, uint8_t *W, uint8_t Nk, uint8_t Nr) {
uint8_t i, j, cols, temp, tmp[4];
cols = (Nr + 1) << 2;
memcpy(W, key, (keysize >> 3)*sizeof(uint8_t));
for(i=Nk; i<cols; i++) {
for(j=0; j<4; j++)
tmp[j] = GET(W, j, i-1);
if(Nk > 6) {
if(i % Nk == 0) {
temp = KeySbox[tmp[0]] ^ (Rcon[i/Nk] & 0x000000ff);
tmp[0] = KeySbox[tmp[1]] ^ ((Rcon[i/Nk] & 0xff000000) >> 24);
tmp[1] = KeySbox[tmp[2]] ^ ((Rcon[i/Nk] & 0x00ff0000) >> 16);
tmp[2] = KeySbox[tmp[3]] ^ ((Rcon[i/Nk] & 0x0000ff00) >> 8);
tmp[3] = temp;
} else if(i % Nk == 4) {
tmp[0] = KeySbox[tmp[0]];
tmp[1] = KeySbox[tmp[1]];
tmp[2] = KeySbox[tmp[2]];
tmp[3] = KeySbox[tmp[3]];
}
} else {
if(i % Nk == 0) {
temp = KeySbox[tmp[0]] ^ (Rcon[i/Nk] & 0x000000ff);
tmp[0] = KeySbox[tmp[1]] ^ ((Rcon[i/Nk] & 0xff000000) >> 24);
tmp[1] = KeySbox[tmp[2]] ^ ((Rcon[i/Nk] & 0x00ff0000) >> 16);
tmp[2] = KeySbox[tmp[3]] ^ ((Rcon[i/Nk] & 0x0000ff00) >> 8);
tmp[3] = temp;
}
}
for(j=0; j<4; j++)
GET(W, j, i) = GET(W, j, i-Nk) ^ tmp[j];
}
}
void aes_cuda(uint8_t *in, uint8_t *chave, uint8_t *out, uint8_t tamanhoChave, uint64_t offset, dim3 numeroBlocos, uint8_t acao) {
uint8_t *cp, *W, *cW, Nk, Nr;
Nk = tamanhoChave >> 5;
Nr = Nk + 6;
long size = 4*4*offset*sizeof(uint8_t);
uint64_t s = ((Nr+1) * sizeof(uint8_t)) << 4;
W = (uint8_t *)malloc(s);
hipMalloc((void**)&cW, s);
ExpandKeys(chave, tamanhoChave, W, Nk, Nr);
hipMemcpyAsync(cW, W, s, hipMemcpyHostToDevice);
hipMalloc((void**)&cp, size);
hipMemcpyAsync(cp, in, size, hipMemcpyHostToDevice);
if(acao) {
hipLaunchKernelGGL(( Aes), dim3(numeroBlocos), dim3(THREADS), 0, 0, cp, cW, Nr);
} else {
hipLaunchKernelGGL(( InvAes), dim3(numeroBlocos), dim3(THREADS), 0, 0, cp, cW, Nr);
}
hipMemcpy(out, cp, size, hipMemcpyDeviceToHost);
hipFree(&cW);
hipFree(&cp);
}
uint8_t stringToByteArray(char *str, uint8_t *array[]) {
register uint8_t i;
uint8_t len = strlen(str) >> 1;
*array = (uint8_t *)malloc(len * sizeof(uint8_t));
for(i=0; i<len; i++)
sscanf(str + i*2, "%02X", *array+i);
return len;
}
void printHexArray(uint8_t *array, uint64_t size) {
register uint8_t i;
for(i=0; i<size; i++)
printf("%02X", array[i]);
printf("\n");
}
//Popula uma entrada alearia
void aleatorio(uint8_t *entrada, uint64_t size) {
for(uint64_t i = 0; i < size; i++)
entrada[i] = (uint8_t)(rand() % 0xff);
}
int main(int argc, char **argv){
clock_t passo;
passo = clock();
uint8_t *chave, *out, *in;
uint64_t blocos;
if(argc < 4) {
printf("Nmero de parmetros errados\nUse: aes BLOCOS THREADSPORBLOCO TAMANHOCHAVE TAMANHOENTRADA\n");
return 1;
}
dim3 numeroBlocos = (atoi(argv[1]), atoi(argv[2]));
uint8_t tamanhoChave = atoi(argv[3]);
uint64_t tamanhoIn = atoi(argv[4]);
if(tamanhoChave != 16 && tamanhoChave != 24 && tamanhoChave != 32) {
printf("Tamanho da chave invlido\n");
return 1;
}
if(tamanhoIn == 0) {
char *chavein = "000102030405060708090a0b0c0d0e0f";
char *inin = "3243f6a8885a308d313198a2e037073400112233445566778899aabbccddeeff";
tamanhoChave = stringToByteArray(chavein, &chave);
tamanhoIn = stringToByteArray(inin, &in);
} else {
if(tamanhoIn % 16 != 0) {
printf("Tamanho de bloco invlido\n Deve ser mltiplo de 16\n resto: %d \n", (tamanhoIn % 16));
return 1;
} else {
srand(time(NULL));
chave = (uint8_t *)malloc(tamanhoChave * sizeof(uint8_t));
in = (uint8_t *)malloc(tamanhoIn * sizeof(uint8_t));
aleatorio(chave, tamanhoChave);
aleatorio(in, tamanhoIn);
}
}
blocos = tamanhoIn / 16;
printf("%d\n", tamanhoIn);
printf("Entrada : ");
printHexArray(in, 32);
out = (uint8_t *)malloc(tamanhoIn * sizeof(uint8_t));
memset(out, 0, tamanhoIn);
printf("Tempo de inicializao em ms %f\n", (clock() - passo) / (double)CLOCKS_PER_SEC/1000);
printf("Criptografa CUDA\n");
passo = clock();
aes_cuda(in, chave, out, tamanhoChave << 3, blocos, numeroBlocos, 1);
printf("Tempo em ms %f\n", (clock() - passo) / (double)CLOCKS_PER_SEC);
printHexArray(out, 32);
!memcmp(in, out, tamanhoIn)?printf("Falha\n"):printf("Ok\n");
printf("Descriptografa CUDA\n");
passo = clock();
aes_cuda(out, chave, out, tamanhoChave << 3, blocos, numeroBlocos, 0);
printf("Tempo em ms %f\n", (clock() - passo) / (double)CLOCKS_PER_SEC);
printHexArray(out, 32);
printf("Verificando algoritmo CUDA: ");
!memcmp(in, out, tamanhoIn)?printf("OK\n"):printf("Falha. Verifique o algoritmo\n");
printf("\n");
return EXIT_SUCCESS;
}
| 93903c8ef840a14e31f56257be16f800701f669d.cu | #include <stdio.h>
#include <time.h>
#include "aes2.h"
#define caes_mul(a, b) ((a)&&(b)?CiLogTable[(ClogTable[(a)]+ClogTable[(b)])%0xff]:0)
#define GET(M,X,Y) ((M)[((Y) << 2) + (X)])
int const THREADS = 512;
__device__ void SubBytes(uint8_t *estado) {
estado[threadIdx.x] = Sbox[estado[threadIdx.x]];
}
__device__ void InvSubBytes(uint8_t *estado) {
estado[threadIdx.x] = InvSbox[estado[threadIdx.x]];
}
__device__ void ShiftRows(uint8_t *estado) {
unsigned int idx = threadIdx.x;
int row = idx % 4;
uint8_t t;
t = estado[((idx + 4*row) % 16) + ((idx >> 4 ) << 4)];
__syncthreads();
estado[idx] = t;
}
__device__ void InvShiftRows(uint8_t *estado) {
unsigned int idx = threadIdx.x;
int row = idx % 4;
uint8_t t;
t = estado[((idx - 4*row) % 16) + ((idx >> 4 ) << 4)];
__syncthreads();
estado[idx] = t;
}
__device__ void MixColumns(uint8_t *estado) {
unsigned int idx = threadIdx.x;
int base = idx % 4;
uint8_t t;
if(base == 0) t = caes_mul(0x02, estado[idx]) ^ caes_mul(0x03, estado[idx+1]) ^ estado[idx+2] ^ estado[idx+3];
if(base == 1) t = estado[idx-1] ^ caes_mul(0x02, estado[idx]) ^ caes_mul(0x03, estado[idx+1]) ^ estado[idx+2];
if(base == 2) t = estado[idx-2] ^ estado[idx-1] ^ caes_mul(0x02, estado[idx]) ^ caes_mul(0x03, estado[idx+1]);
if(base == 3) t = caes_mul(0x03, estado[idx-3]) ^ estado[idx-2] ^ estado[idx-1] ^ caes_mul(0x02, estado[idx]);
__syncthreads();
estado[idx] = t;
}
__device__ void InvMixColumns(uint8_t *estado) {
unsigned int idx = threadIdx.x;
int base = idx % 4;
uint8_t t;
if(base == 0) t = caes_mul(0x0e, estado[idx]) ^ caes_mul(0x0b, estado[idx+1]) ^ caes_mul(0x0d, estado[idx+2]) ^ caes_mul(0x09, estado[idx+3]);
if(base == 1) t = caes_mul(0x09, estado[idx-1]) ^ caes_mul(0x0e, estado[idx]) ^ caes_mul(0x0b, estado[idx+1]) ^ caes_mul(0x0d, estado[idx+2]);
if(base == 2) t = caes_mul(0x0d, estado[idx-2]) ^ caes_mul(0x09, estado[idx-1]) ^ caes_mul(0x0e, estado[idx]) ^ caes_mul(0x0b, estado[idx+1]);
if(base == 3) t = caes_mul(0x0b, estado[idx-3]) ^ caes_mul(0x0d, estado[idx-2]) ^ caes_mul(0x09, estado[idx-1]) ^ caes_mul(0x0e, estado[idx]);
__syncthreads();
estado[idx] = t;
}
__device__ void AddRoundKey(uint8_t *estado, uint8_t *chave) {
estado[threadIdx.x] ^= chave[threadIdx.x % 16];
}
__global__ void InvAes(uint8_t *cp, uint8_t *cW, uint8_t Nr) {
__shared__ uint8_t estado[THREADS];
register int i;
estado[threadIdx.x] = cp[(blockIdx.x*blockDim.x)+(blockIdx.y*blockDim.x*gridDim.x)+threadIdx.x];
__syncthreads();
AddRoundKey(estado, cW+(Nr << 4));
for(i=Nr; i>1; i--) {
InvShiftRows(estado);
InvSubBytes(estado);
AddRoundKey(estado, cW+((i-1) << 4));
InvMixColumns(estado);
}
InvShiftRows(estado);
InvSubBytes(estado);
AddRoundKey(estado, cW);
__syncthreads();
cp[(blockIdx.x*blockDim.x)+(blockIdx.y*blockDim.x*gridDim.x)+threadIdx.x] = estado[threadIdx.x];
}
__global__ void Aes(uint8_t *cp, uint8_t *cW, uint8_t Nr) {
__shared__ uint8_t estado[THREADS];
register int i;
estado[threadIdx.x] = cp[(blockIdx.x*blockDim.x)+(blockIdx.y*blockDim.x*gridDim.x)+threadIdx.x];
__syncthreads();
AddRoundKey(estado, cW);
for(i=1; i<Nr; i++) {
SubBytes(estado);
ShiftRows(estado);
MixColumns(estado);
AddRoundKey(estado, cW+(i << 4));
}
SubBytes(estado);
ShiftRows(estado);
AddRoundKey(estado, cW+(i << 4));
__syncthreads();
cp[(blockIdx.x*blockDim.x)+(blockIdx.y*blockDim.x*gridDim.x)+threadIdx.x] = estado[threadIdx.x];
}
void ExpandKeys(uint8_t *key, uint8_t keysize, uint8_t *W, uint8_t Nk, uint8_t Nr) {
uint8_t i, j, cols, temp, tmp[4];
cols = (Nr + 1) << 2;
memcpy(W, key, (keysize >> 3)*sizeof(uint8_t));
for(i=Nk; i<cols; i++) {
for(j=0; j<4; j++)
tmp[j] = GET(W, j, i-1);
if(Nk > 6) {
if(i % Nk == 0) {
temp = KeySbox[tmp[0]] ^ (Rcon[i/Nk] & 0x000000ff);
tmp[0] = KeySbox[tmp[1]] ^ ((Rcon[i/Nk] & 0xff000000) >> 24);
tmp[1] = KeySbox[tmp[2]] ^ ((Rcon[i/Nk] & 0x00ff0000) >> 16);
tmp[2] = KeySbox[tmp[3]] ^ ((Rcon[i/Nk] & 0x0000ff00) >> 8);
tmp[3] = temp;
} else if(i % Nk == 4) {
tmp[0] = KeySbox[tmp[0]];
tmp[1] = KeySbox[tmp[1]];
tmp[2] = KeySbox[tmp[2]];
tmp[3] = KeySbox[tmp[3]];
}
} else {
if(i % Nk == 0) {
temp = KeySbox[tmp[0]] ^ (Rcon[i/Nk] & 0x000000ff);
tmp[0] = KeySbox[tmp[1]] ^ ((Rcon[i/Nk] & 0xff000000) >> 24);
tmp[1] = KeySbox[tmp[2]] ^ ((Rcon[i/Nk] & 0x00ff0000) >> 16);
tmp[2] = KeySbox[tmp[3]] ^ ((Rcon[i/Nk] & 0x0000ff00) >> 8);
tmp[3] = temp;
}
}
for(j=0; j<4; j++)
GET(W, j, i) = GET(W, j, i-Nk) ^ tmp[j];
}
}
void aes_cuda(uint8_t *in, uint8_t *chave, uint8_t *out, uint8_t tamanhoChave, uint64_t offset, dim3 numeroBlocos, uint8_t acao) {
uint8_t *cp, *W, *cW, Nk, Nr;
Nk = tamanhoChave >> 5;
Nr = Nk + 6;
long size = 4*4*offset*sizeof(uint8_t);
uint64_t s = ((Nr+1) * sizeof(uint8_t)) << 4;
W = (uint8_t *)malloc(s);
cudaMalloc((void**)&cW, s);
ExpandKeys(chave, tamanhoChave, W, Nk, Nr);
cudaMemcpyAsync(cW, W, s, cudaMemcpyHostToDevice);
cudaMalloc((void**)&cp, size);
cudaMemcpyAsync(cp, in, size, cudaMemcpyHostToDevice);
if(acao) {
Aes<<<numeroBlocos, THREADS>>>(cp, cW, Nr);
} else {
InvAes<<<numeroBlocos, THREADS>>>(cp, cW, Nr);
}
cudaMemcpy(out, cp, size, cudaMemcpyDeviceToHost);
cudaFree(&cW);
cudaFree(&cp);
}
uint8_t stringToByteArray(char *str, uint8_t *array[]) {
register uint8_t i;
uint8_t len = strlen(str) >> 1;
*array = (uint8_t *)malloc(len * sizeof(uint8_t));
for(i=0; i<len; i++)
sscanf(str + i*2, "%02X", *array+i);
return len;
}
void printHexArray(uint8_t *array, uint64_t size) {
register uint8_t i;
for(i=0; i<size; i++)
printf("%02X", array[i]);
printf("\n");
}
//Popula uma entrada aleaória
void aleatorio(uint8_t *entrada, uint64_t size) {
for(uint64_t i = 0; i < size; i++)
entrada[i] = (uint8_t)(rand() % 0xff);
}
int main(int argc, char **argv){
clock_t passo;
passo = clock();
uint8_t *chave, *out, *in;
uint64_t blocos;
if(argc < 4) {
printf("Número de parâmetros errados\nUse: aes BLOCOS THREADSPORBLOCO TAMANHOCHAVE TAMANHOENTRADA\n");
return 1;
}
dim3 numeroBlocos = (atoi(argv[1]), atoi(argv[2]));
uint8_t tamanhoChave = atoi(argv[3]);
uint64_t tamanhoIn = atoi(argv[4]);
if(tamanhoChave != 16 && tamanhoChave != 24 && tamanhoChave != 32) {
printf("Tamanho da chave inválido\n");
return 1;
}
if(tamanhoIn == 0) {
char *chavein = "000102030405060708090a0b0c0d0e0f";
char *inin = "3243f6a8885a308d313198a2e037073400112233445566778899aabbccddeeff";
tamanhoChave = stringToByteArray(chavein, &chave);
tamanhoIn = stringToByteArray(inin, &in);
} else {
if(tamanhoIn % 16 != 0) {
printf("Tamanho de bloco inválido\n Deve ser múltiplo de 16\n resto: %d \n", (tamanhoIn % 16));
return 1;
} else {
srand(time(NULL));
chave = (uint8_t *)malloc(tamanhoChave * sizeof(uint8_t));
in = (uint8_t *)malloc(tamanhoIn * sizeof(uint8_t));
aleatorio(chave, tamanhoChave);
aleatorio(in, tamanhoIn);
}
}
blocos = tamanhoIn / 16;
printf("%d\n", tamanhoIn);
printf("Entrada : ");
printHexArray(in, 32);
out = (uint8_t *)malloc(tamanhoIn * sizeof(uint8_t));
memset(out, 0, tamanhoIn);
printf("Tempo de inicialização em ms %f\n", (clock() - passo) / (double)CLOCKS_PER_SEC/1000);
printf("Criptografa CUDA\n");
passo = clock();
aes_cuda(in, chave, out, tamanhoChave << 3, blocos, numeroBlocos, 1);
printf("Tempo em ms %f\n", (clock() - passo) / (double)CLOCKS_PER_SEC);
printHexArray(out, 32);
!memcmp(in, out, tamanhoIn)?printf("Falha\n"):printf("Ok\n");
printf("Descriptografa CUDA\n");
passo = clock();
aes_cuda(out, chave, out, tamanhoChave << 3, blocos, numeroBlocos, 0);
printf("Tempo em ms %f\n", (clock() - passo) / (double)CLOCKS_PER_SEC);
printHexArray(out, 32);
printf("Verificando algoritmo CUDA: ");
!memcmp(in, out, tamanhoIn)?printf("OK\n"):printf("Falha. Verifique o algoritmo\n");
printf("\n");
return EXIT_SUCCESS;
}
|
509eaa3e1b8240181b223b35172fe216cd23ffe6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
//GLOBALS
const glm::vec3 gravity(0, -9.8, 0);
int dimx, dimz;
dim3 threadsPerBlock;
dim3 fullBlocksPerGridV; //for parallel vertex operations
dim3 fullBlocksPerGridT; //for parallel triangle operations
int threadsPerBlockE; //for parallel edge operations
int fullBlocksPerGridE; //for parallel edge operations
int vertexCount;
int triangleCount;
int edgeCount;
glm::vec3* pos;
glm::vec3* predicted_pos;
glm::vec3* normals;
glm::vec3* vel;
bool* lock_pos;
float* inv_mass;
//TEMP
//glm::vec3* acc;
//float Ks = 0.01;
//float Kd = 0;
int* triangles;
Edge* edges;
int solver_iterations = 10;
float fp_stiff = 1;
<<<<<<< HEAD
float st_stiff = 0.9;
=======
float st_stiff = 0.5;
>>>>>>> 064d51024169ffa5eeb020a4b68f4ae59822a0e2
float b_stiff = 0.05;
float col_stiff = 1;
FixedPointConstraint* fp_constraints;
StretchConstraint* st_constraints;
BendConstraint* b_constraints;
CollisionConstraint* col_onstraints;
//SelfCollisionConstraint* sc_constraints;
void checkCUDAError(const char *msg, int line = -1)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
//exit(EXIT_FAILURE);
}
}
// Initialize vertex attributes
__global__ void initVertexAttributes(glm::vec3* pos, glm::vec3* predicted_pos, glm::vec3* vel,
glm::vec3* normals, bool* lock_pos, float* inv_mass,
int dimx, int dimz, float dx, float dz, float y0, float mass)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
pos[index] = glm::vec3(i * dx, y0, j * dz);
predicted_pos[index] = glm::vec3(i * dx, y0, j * dz);
vel[index] = glm::vec3(0, 0, 0);
normals[index] = glm::vec3(0, 1, 0);
lock_pos[index] = false;
inv_mass[index] = 1/mass;
}
}
// Populate the triangle list
__global__ void initTriangles(int* triangles, Edge* edges, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx-1 && j < dimz-1) {
int index = j + i * (dimz-1);
triangles[6*index ] = dimz*i + j;
triangles[6*index+1] = dimz*i + j + 1;
triangles[6*index+2] = dimz*(i+1) + j;
triangles[6*index+3] = dimz*(i+1) + j;
triangles[6*index+4] = dimz*i + j + 1;
triangles[6*index+5] = dimz*(i+1) + j + 1;
edges[3*index ] = Edge(dimz*i+j, dimz*i+j+1, 6*index, 6*((i-1)*dimz+j)+3);
edges[3*index+1] = Edge(dimz*i+j+1, dimz*(i+1)+j, 6*index, 6*index+3);
edges[3*index+2] = Edge(dimz*(i+1)+j, dimz*i+j, 6*index, 6*(i*dimz+j-1)+3);
if (i == 0) {
edges[3*index].tri2 = -1; //no triangle on the other side
}
if (j == 0) {
edges[3*index+2].tri2 = -1; //no triangle on the other side
}
if (i == dimx-2) {
edges[3*(dimx-1)*(dimz-1)+j] = Edge(dimz*(i+1)+j+1, dimz*(i+1)+j, 6*index+3, -1);
}
if (j == dimz-2) {
edges[3*(dimx-1)*(dimz-1)+dimz-1+i] = Edge(dimz*i+j+1, dimz*(i+1)+j+1, 6*index+3, -1);
}
}
}
// Apply external force, damp velocity and compute predicted positions
__global__ void preConstraintsUpdate(glm::vec3* predicted_pos, glm::vec3* vel, float* inv_mass,
int dimx, int dimz, glm::vec3 force, float dt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
vel[index] += force * inv_mass[index] * dt;
// no damp velocity for now
predicted_pos[index] += vel[index] * dt;
}
}
// Generate fixed point constraints
__global__ void generateFPConstraints(FixedPointConstraint* fp_constraints,
int dimx, int dimz, float dx, float dz, float y0)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
//if (i == 0 && j == 0) {
// glm::vec3 fp(0, y0, 0);
// fp_constraints[index] = FixedPointConstraint(index, fp);
//}
//else if (i == 0 && j == dimz-1) {
// glm::vec3 fp(0, y0, (dimz-1)*dz);
// fp_constraints[index] = FixedPointConstraint(index, fp);
//}
if (i == 0) {
glm::vec3 fp(0, y0, dz*j);
fp_constraints[index] = FixedPointConstraint(index, fp);
}
else {
fp_constraints[index] = FixedPointConstraint(-1, glm::vec3(0));
}
}
}
__global__ void generateStretchBendConstraints(StretchConstraint* st_constraints, BendConstraint* b_constraints,
glm::vec3* pos, Edge* edges, int* triangles, int edgeCount)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < edgeCount) {
// generate stretch constraints
int v1 = edges[index].v1;
int v2 = edges[index].v2;
glm::vec3 p1 = pos[v1];
glm::vec3 p2 = pos[v2];
float l = glm::length(p1 - p2);
st_constraints[index] = StretchConstraint(v1, v2, l);
}
}
// Project fixed point constraints
__global__ void resolveFPConstraints(FixedPointConstraint* fp_constraints, glm::vec3* predicted_pos,
bool* lock_pos, float stiff, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
FixedPointConstraint fpc = fp_constraints[index];
if (fpc.v0 != -1) {
lock_pos[fpc.v0] = true;
glm::vec3 dist = fpc.fixed_point - predicted_pos[fpc.v0];
predicted_pos[fpc.v0] += dist * stiff;
}
}
}
__global__ void resolveStretchBendConstraints(StretchConstraint* st_constraints, BendConstraint* b_constraints,
glm::vec3* predicted_pos, float* inv_mass, bool* lock_pos,
float st_stiff, float b_stiff, int edgeCount)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < edgeCount) {
// project stretch constraints
StretchConstraint stc = st_constraints[index];
glm::vec3 sp1 = predicted_pos[stc.v1];
glm::vec3 sp2 = predicted_pos[stc.v2];
float l = glm::length(sp1 - sp2);
float sw1 = lock_pos[stc.v1] ? 0 : inv_mass[stc.v1];
float sw2 = lock_pos[stc.v2] ? 0 : inv_mass[stc.v2];
if (!(sw1 < FLT_EPSILON && sw2 < FLT_EPSILON)) {
glm::vec3 dist1 = -sw1 / (sw1+sw2) * (l-stc.rest_length) * glm::normalize(sp1-sp2);
glm::vec3 dist2 = sw2 / (sw1+sw2) * (l-stc.rest_length) * glm::normalize(sp1-sp2);
atomicAdd(&(predicted_pos[stc.v1].x), dist1.x * st_stiff);
atomicAdd(&(predicted_pos[stc.v1].y), dist1.y * st_stiff);
atomicAdd(&(predicted_pos[stc.v1].z), dist1.z * st_stiff);
atomicAdd(&(predicted_pos[stc.v2].x), dist2.x * st_stiff);
atomicAdd(&(predicted_pos[stc.v2].y), dist2.y * st_stiff);
atomicAdd(&(predicted_pos[stc.v2].z), dist2.z * st_stiff);
}
}
}
//__global__ void resetAcc(glm::vec3* acc, int dimx, int dimz)
//{
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// int j = blockIdx.y * blockDim.y + threadIdx.y;
//
// if (i < dimx && j < dimz) {
// int index = j + i * dimz;
//
// acc[index] = glm::vec3(0, 0, 0);
// }
//}
//
//__global__ void computeAcc(glm::vec3* acc, StretchConstraint* st_constraints, glm::vec3* predicted_pos,
// glm::vec3* vel, float* inv_mass, float Ks, float Kd, int edgeCount)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
//
// if (index < edgeCount) {
// // project stretch constraints
// StretchConstraint stc = st_constraints[index];
// glm::vec3 sp1 = predicted_pos[stc.v1];
// glm::vec3 sp2 = predicted_pos[stc.v2];
// glm::vec3 sv1 = vel[stc.v1];
// glm::vec3 sv2 = vel[stc.v2];
// float l = glm::length(sp1 - sp2);
//
// glm::vec3 acc1 = -Ks * (l-stc.rest_length) * glm::normalize(sp1-sp2) - Kd * (sv1-sv2)
// * glm::normalize(sp1-sp2) * glm::normalize(sp1-sp2);
// glm::vec3 acc2 = -Ks * (l-stc.rest_length) * glm::normalize(sp2-sp1) - Kd * (sv2-sv1)
// * glm::normalize(sp2-sp1) * glm::normalize(sp2-sp1);
// acc1 = acc1 * inv_mass[stc.v1];
// acc2 = acc2 * inv_mass[stc.v2];
//
// atomicAdd(&(acc[stc.v1].x), acc1.x);
// atomicAdd(&(acc[stc.v1].y), acc1.y);
// atomicAdd(&(acc[stc.v1].z), acc1.z);
// atomicAdd(&(acc[stc.v2].x), acc2.x);
// atomicAdd(&(acc[stc.v2].y), acc2.y);
// atomicAdd(&(acc[stc.v2].z), acc2.z);
// }
//}
//
//__global__ void applyAcc(glm::vec3* acc, glm::vec3* vel, glm::vec3* predicted_pos, bool* lock_pos,
// float dt, int dimx, int dimz)
//{
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// int j = blockIdx.y * blockDim.y + threadIdx.y;
//
// if (i < dimx && j < dimz) {
// int index = j + i * dimz;
//
// if (!lock_pos[index]) {
// predicted_pos[index] += (vel[index] + acc[index]*dt) * dt;
// }
// }
//}
// Update velocity and position based on predicted position
__global__ void integrate(glm::vec3* pos, glm::vec3* predicted_pos, glm::vec3* vel, float dt, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
vel[index] = (predicted_pos[index] - pos[index]) / dt;
pos[index] = predicted_pos[index];
}
}
__global__ void resetNormal(glm::vec3* normals, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
normals[index] = glm::vec3(0, 0, 0);
}
}
__global__ void computeNormal(glm::vec3* pos, glm::vec3* normals, int* triangles, int triangleCount)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < triangleCount) {
int v0 = triangles[3*index];
int v1 = triangles[3*index+1];
int v2 = triangles[3*index+2];
glm::vec3 p0 = pos[v0];
glm::vec3 p1 = pos[v1];
glm::vec3 p2 = pos[v2];
glm::vec3 n = glm::normalize(glm::cross(p1-p0, p2-p1));
atomicAdd(&(normals[v0].x), n.x);
atomicAdd(&(normals[v0].y), n.y);
atomicAdd(&(normals[v0].z), n.z);
atomicAdd(&(normals[v1].x), n.x);
atomicAdd(&(normals[v1].y), n.y);
atomicAdd(&(normals[v1].z), n.z);
atomicAdd(&(normals[v2].x), n.x);
atomicAdd(&(normals[v2].y), n.y);
atomicAdd(&(normals[v2].z), n.z);
}
}
__global__ void resizeNormal(glm::vec3* normals, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
normals[index] = glm::normalize(normals[index]);
}
}
// Update vbo and nbo
__global__ void sendToVAO(glm::vec3* pos, glm::vec3* normals, float* vbo, float* nbo,
int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
vbo[4*index ] = pos[index].x;
vbo[4*index+1] = pos[index].y;
vbo[4*index+2] = pos[index].z;
vbo[4*index+3] = 1;
nbo[4*index ] = normals[index].x;
nbo[4*index+1] = normals[index].y;
nbo[4*index+2] = normals[index].z;
nbo[4*index+3] = 0;
}
}
//Initialize positions and other attributes of vertices
void initCuda(int xdim, int zdim, float dx, float dz, float y0, float mass)
{
dimx = xdim;
dimz = zdim;
vertexCount = dimx*dimz;
triangleCount = 2*(dimx-1)*(dimz-1);
edgeCount = 3*(dimx-1)*(dimz-1)+dimx-1+dimz-1;
hipMalloc((void**)&pos, vertexCount*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&predicted_pos, vertexCount*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&vel, vertexCount*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&normals, vertexCount*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&lock_pos, vertexCount*sizeof(bool));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&inv_mass, vertexCount*sizeof(float));
checkCUDAErrorWithLine("Kernel failed!");
// TEMP
//hipMalloc((void**)&acc, vertexCount*sizeof(glm::vec3));
//checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&triangles, 3*triangleCount*sizeof(int));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&edges, edgeCount*sizeof(Edge));
checkCUDAErrorWithLine("Kernel failed!");
threadsPerBlock = dim3(blockSize, blockSize);
fullBlocksPerGridV = dim3((int)ceil(float(dimx)/float(blockSize)), (int)ceil(float(dimz)/float(blockSize)));
hipLaunchKernelGGL(( initVertexAttributes), dim3(fullBlocksPerGridV), dim3(threadsPerBlock), 0, 0, pos, predicted_pos, vel, normals,
lock_pos, inv_mass, dimx, dimz, dx, dz, y0, mass);
checkCUDAErrorWithLine("Kernel failed!");
dim3 fullBlocksPerGridV1 = dim3((int)ceil(float(dimx-1)/float(blockSize)), (int)ceil(float(dimz-1)/float(blockSize)));
hipLaunchKernelGGL(( initTriangles), dim3(fullBlocksPerGridV1), dim3(threadsPerBlock), 0, 0, triangles, edges, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
threadsPerBlockE = blockSize * blockSize;
fullBlocksPerGridE = (int)ceil(float(edgeCount)/float(threadsPerBlockE));
fullBlocksPerGridT = (int)ceil(float(triangleCount)/float(threadsPerBlockE));
// calculate stiffness based on solver iterations
st_stiff = (1 - pow(1-st_stiff, 1.0f/solver_iterations)) * 6;
b_stiff = 1 - pow(1-b_stiff, 1.0f/solver_iterations);
hipMalloc((void**)&fp_constraints, vertexCount*sizeof(FixedPointConstraint));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&st_constraints, edgeCount*sizeof(StretchConstraint));
checkCUDAErrorWithLine("Kernel failed!");
hipMalloc((void**)&b_constraints, edgeCount*sizeof(BendConstraint));
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( generateFPConstraints), dim3(fullBlocksPerGridV), dim3(threadsPerBlock), 0, 0, fp_constraints, dimx, dimz,
dx, dz, y0);
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( generateStretchBendConstraints), dim3(fullBlocksPerGridE), dim3(threadsPerBlockE), 0, 0, st_constraints,
b_constraints, pos, edges, triangles, edgeCount);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
void update(float dt)
{
hipLaunchKernelGGL(( preConstraintsUpdate), dim3(fullBlocksPerGridV), dim3(threadsPerBlock), 0, 0, predicted_pos, vel, inv_mass,
dimx, dimz, gravity, dt);
checkCUDAErrorWithLine("Kernel failed!");
// resolve constraints
hipLaunchKernelGGL(( resolveFPConstraints), dim3(fullBlocksPerGridV), dim3(threadsPerBlock), 0, 0, fp_constraints, predicted_pos,
lock_pos, fp_stiff, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
for (int i=0; i<solver_iterations; ++i) {
hipLaunchKernelGGL(( resolveStretchBendConstraints), dim3(fullBlocksPerGridE), dim3(threadsPerBlockE), 0, 0, st_constraints,
b_constraints, predicted_pos, inv_mass, lock_pos, st_stiff, b_stiff, edgeCount);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
// TEMP
//resetAcc<<<fullBlocksPerGridV, threadsPerBlock>>>(acc, dimx, dimz);
//computeAcc<<<fullBlocksPerGridE, threadsPerBlockE>>>(acc, st_constraints, predicted_pos, vel,
// inv_mass, Ks, Kd, edgeCount);
//checkCUDAErrorWithLine("Kernel failed!");
//applyAcc<<<fullBlocksPerGridV, threadsPerBlock>>>(acc, vel, predicted_pos, lock_pos, dt, dimx, dimz);
//checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( integrate), dim3(fullBlocksPerGridV), dim3(threadsPerBlock), 0, 0, pos, predicted_pos, vel, dt, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
// compute normals
hipLaunchKernelGGL(( resetNormal), dim3(fullBlocksPerGridV), dim3(threadsPerBlock), 0, 0, normals, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( computeNormal), dim3(fullBlocksPerGridT), dim3(threadsPerBlockE), 0, 0, pos, normals, triangles, triangleCount);
checkCUDAErrorWithLine("Kernel failed!");
hipLaunchKernelGGL(( resizeNormal), dim3(fullBlocksPerGridV), dim3(threadsPerBlock), 0, 0, normals, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
void cudaUpdateVAO(float * vbodptr, float * nbodptr)
{
hipLaunchKernelGGL(( sendToVAO), dim3(fullBlocksPerGridV), dim3(threadsPerBlock), 0, 0, pos, normals, vbodptr, nbodptr, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
hipDeviceSynchronize();
}
void freeCuda() {
hipFree(pos);
hipFree(predicted_pos);
hipFree(vel);
hipFree(normals);
hipFree(triangles);
hipFree(edges);
hipFree(lock_pos);
hipFree(inv_mass);
hipFree(fp_constraints);
hipFree(st_constraints);
hipFree(b_constraints);
hipFree(col_onstraints);
} | 509eaa3e1b8240181b223b35172fe216cd23ffe6.cu | #include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "glm/glm.hpp"
#include "utilities.h"
#include "kernel.h"
//GLOBALS
const glm::vec3 gravity(0, -9.8, 0);
int dimx, dimz;
dim3 threadsPerBlock;
dim3 fullBlocksPerGridV; //for parallel vertex operations
dim3 fullBlocksPerGridT; //for parallel triangle operations
int threadsPerBlockE; //for parallel edge operations
int fullBlocksPerGridE; //for parallel edge operations
int vertexCount;
int triangleCount;
int edgeCount;
glm::vec3* pos;
glm::vec3* predicted_pos;
glm::vec3* normals;
glm::vec3* vel;
bool* lock_pos;
float* inv_mass;
//TEMP
//glm::vec3* acc;
//float Ks = 0.01;
//float Kd = 0;
int* triangles;
Edge* edges;
int solver_iterations = 10;
float fp_stiff = 1;
<<<<<<< HEAD
float st_stiff = 0.9;
=======
float st_stiff = 0.5;
>>>>>>> 064d51024169ffa5eeb020a4b68f4ae59822a0e2
float b_stiff = 0.05;
float col_stiff = 1;
FixedPointConstraint* fp_constraints;
StretchConstraint* st_constraints;
BendConstraint* b_constraints;
CollisionConstraint* col_onstraints;
//SelfCollisionConstraint* sc_constraints;
void checkCUDAError(const char *msg, int line = -1)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
if( line >= 0 )
{
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
//exit(EXIT_FAILURE);
}
}
// Initialize vertex attributes
__global__ void initVertexAttributes(glm::vec3* pos, glm::vec3* predicted_pos, glm::vec3* vel,
glm::vec3* normals, bool* lock_pos, float* inv_mass,
int dimx, int dimz, float dx, float dz, float y0, float mass)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
pos[index] = glm::vec3(i * dx, y0, j * dz);
predicted_pos[index] = glm::vec3(i * dx, y0, j * dz);
vel[index] = glm::vec3(0, 0, 0);
normals[index] = glm::vec3(0, 1, 0);
lock_pos[index] = false;
inv_mass[index] = 1/mass;
}
}
// Populate the triangle list
__global__ void initTriangles(int* triangles, Edge* edges, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx-1 && j < dimz-1) {
int index = j + i * (dimz-1);
triangles[6*index ] = dimz*i + j;
triangles[6*index+1] = dimz*i + j + 1;
triangles[6*index+2] = dimz*(i+1) + j;
triangles[6*index+3] = dimz*(i+1) + j;
triangles[6*index+4] = dimz*i + j + 1;
triangles[6*index+5] = dimz*(i+1) + j + 1;
edges[3*index ] = Edge(dimz*i+j, dimz*i+j+1, 6*index, 6*((i-1)*dimz+j)+3);
edges[3*index+1] = Edge(dimz*i+j+1, dimz*(i+1)+j, 6*index, 6*index+3);
edges[3*index+2] = Edge(dimz*(i+1)+j, dimz*i+j, 6*index, 6*(i*dimz+j-1)+3);
if (i == 0) {
edges[3*index].tri2 = -1; //no triangle on the other side
}
if (j == 0) {
edges[3*index+2].tri2 = -1; //no triangle on the other side
}
if (i == dimx-2) {
edges[3*(dimx-1)*(dimz-1)+j] = Edge(dimz*(i+1)+j+1, dimz*(i+1)+j, 6*index+3, -1);
}
if (j == dimz-2) {
edges[3*(dimx-1)*(dimz-1)+dimz-1+i] = Edge(dimz*i+j+1, dimz*(i+1)+j+1, 6*index+3, -1);
}
}
}
// Apply external force, damp velocity and compute predicted positions
__global__ void preConstraintsUpdate(glm::vec3* predicted_pos, glm::vec3* vel, float* inv_mass,
int dimx, int dimz, glm::vec3 force, float dt)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
vel[index] += force * inv_mass[index] * dt;
// no damp velocity for now
predicted_pos[index] += vel[index] * dt;
}
}
// Generate fixed point constraints
__global__ void generateFPConstraints(FixedPointConstraint* fp_constraints,
int dimx, int dimz, float dx, float dz, float y0)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
//if (i == 0 && j == 0) {
// glm::vec3 fp(0, y0, 0);
// fp_constraints[index] = FixedPointConstraint(index, fp);
//}
//else if (i == 0 && j == dimz-1) {
// glm::vec3 fp(0, y0, (dimz-1)*dz);
// fp_constraints[index] = FixedPointConstraint(index, fp);
//}
if (i == 0) {
glm::vec3 fp(0, y0, dz*j);
fp_constraints[index] = FixedPointConstraint(index, fp);
}
else {
fp_constraints[index] = FixedPointConstraint(-1, glm::vec3(0));
}
}
}
__global__ void generateStretchBendConstraints(StretchConstraint* st_constraints, BendConstraint* b_constraints,
glm::vec3* pos, Edge* edges, int* triangles, int edgeCount)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < edgeCount) {
// generate stretch constraints
int v1 = edges[index].v1;
int v2 = edges[index].v2;
glm::vec3 p1 = pos[v1];
glm::vec3 p2 = pos[v2];
float l = glm::length(p1 - p2);
st_constraints[index] = StretchConstraint(v1, v2, l);
}
}
// Project fixed point constraints
__global__ void resolveFPConstraints(FixedPointConstraint* fp_constraints, glm::vec3* predicted_pos,
bool* lock_pos, float stiff, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
FixedPointConstraint fpc = fp_constraints[index];
if (fpc.v0 != -1) {
lock_pos[fpc.v0] = true;
glm::vec3 dist = fpc.fixed_point - predicted_pos[fpc.v0];
predicted_pos[fpc.v0] += dist * stiff;
}
}
}
__global__ void resolveStretchBendConstraints(StretchConstraint* st_constraints, BendConstraint* b_constraints,
glm::vec3* predicted_pos, float* inv_mass, bool* lock_pos,
float st_stiff, float b_stiff, int edgeCount)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < edgeCount) {
// project stretch constraints
StretchConstraint stc = st_constraints[index];
glm::vec3 sp1 = predicted_pos[stc.v1];
glm::vec3 sp2 = predicted_pos[stc.v2];
float l = glm::length(sp1 - sp2);
float sw1 = lock_pos[stc.v1] ? 0 : inv_mass[stc.v1];
float sw2 = lock_pos[stc.v2] ? 0 : inv_mass[stc.v2];
if (!(sw1 < FLT_EPSILON && sw2 < FLT_EPSILON)) {
glm::vec3 dist1 = -sw1 / (sw1+sw2) * (l-stc.rest_length) * glm::normalize(sp1-sp2);
glm::vec3 dist2 = sw2 / (sw1+sw2) * (l-stc.rest_length) * glm::normalize(sp1-sp2);
atomicAdd(&(predicted_pos[stc.v1].x), dist1.x * st_stiff);
atomicAdd(&(predicted_pos[stc.v1].y), dist1.y * st_stiff);
atomicAdd(&(predicted_pos[stc.v1].z), dist1.z * st_stiff);
atomicAdd(&(predicted_pos[stc.v2].x), dist2.x * st_stiff);
atomicAdd(&(predicted_pos[stc.v2].y), dist2.y * st_stiff);
atomicAdd(&(predicted_pos[stc.v2].z), dist2.z * st_stiff);
}
}
}
//__global__ void resetAcc(glm::vec3* acc, int dimx, int dimz)
//{
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// int j = blockIdx.y * blockDim.y + threadIdx.y;
//
// if (i < dimx && j < dimz) {
// int index = j + i * dimz;
//
// acc[index] = glm::vec3(0, 0, 0);
// }
//}
//
//__global__ void computeAcc(glm::vec3* acc, StretchConstraint* st_constraints, glm::vec3* predicted_pos,
// glm::vec3* vel, float* inv_mass, float Ks, float Kd, int edgeCount)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
//
// if (index < edgeCount) {
// // project stretch constraints
// StretchConstraint stc = st_constraints[index];
// glm::vec3 sp1 = predicted_pos[stc.v1];
// glm::vec3 sp2 = predicted_pos[stc.v2];
// glm::vec3 sv1 = vel[stc.v1];
// glm::vec3 sv2 = vel[stc.v2];
// float l = glm::length(sp1 - sp2);
//
// glm::vec3 acc1 = -Ks * (l-stc.rest_length) * glm::normalize(sp1-sp2) - Kd * (sv1-sv2)
// * glm::normalize(sp1-sp2) * glm::normalize(sp1-sp2);
// glm::vec3 acc2 = -Ks * (l-stc.rest_length) * glm::normalize(sp2-sp1) - Kd * (sv2-sv1)
// * glm::normalize(sp2-sp1) * glm::normalize(sp2-sp1);
// acc1 = acc1 * inv_mass[stc.v1];
// acc2 = acc2 * inv_mass[stc.v2];
//
// atomicAdd(&(acc[stc.v1].x), acc1.x);
// atomicAdd(&(acc[stc.v1].y), acc1.y);
// atomicAdd(&(acc[stc.v1].z), acc1.z);
// atomicAdd(&(acc[stc.v2].x), acc2.x);
// atomicAdd(&(acc[stc.v2].y), acc2.y);
// atomicAdd(&(acc[stc.v2].z), acc2.z);
// }
//}
//
//__global__ void applyAcc(glm::vec3* acc, glm::vec3* vel, glm::vec3* predicted_pos, bool* lock_pos,
// float dt, int dimx, int dimz)
//{
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// int j = blockIdx.y * blockDim.y + threadIdx.y;
//
// if (i < dimx && j < dimz) {
// int index = j + i * dimz;
//
// if (!lock_pos[index]) {
// predicted_pos[index] += (vel[index] + acc[index]*dt) * dt;
// }
// }
//}
// Update velocity and position based on predicted position
__global__ void integrate(glm::vec3* pos, glm::vec3* predicted_pos, glm::vec3* vel, float dt, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
vel[index] = (predicted_pos[index] - pos[index]) / dt;
pos[index] = predicted_pos[index];
}
}
__global__ void resetNormal(glm::vec3* normals, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
normals[index] = glm::vec3(0, 0, 0);
}
}
__global__ void computeNormal(glm::vec3* pos, glm::vec3* normals, int* triangles, int triangleCount)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < triangleCount) {
int v0 = triangles[3*index];
int v1 = triangles[3*index+1];
int v2 = triangles[3*index+2];
glm::vec3 p0 = pos[v0];
glm::vec3 p1 = pos[v1];
glm::vec3 p2 = pos[v2];
glm::vec3 n = glm::normalize(glm::cross(p1-p0, p2-p1));
atomicAdd(&(normals[v0].x), n.x);
atomicAdd(&(normals[v0].y), n.y);
atomicAdd(&(normals[v0].z), n.z);
atomicAdd(&(normals[v1].x), n.x);
atomicAdd(&(normals[v1].y), n.y);
atomicAdd(&(normals[v1].z), n.z);
atomicAdd(&(normals[v2].x), n.x);
atomicAdd(&(normals[v2].y), n.y);
atomicAdd(&(normals[v2].z), n.z);
}
}
__global__ void resizeNormal(glm::vec3* normals, int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
normals[index] = glm::normalize(normals[index]);
}
}
// Update vbo and nbo
__global__ void sendToVAO(glm::vec3* pos, glm::vec3* normals, float* vbo, float* nbo,
int dimx, int dimz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < dimx && j < dimz) {
int index = j + i * dimz;
vbo[4*index ] = pos[index].x;
vbo[4*index+1] = pos[index].y;
vbo[4*index+2] = pos[index].z;
vbo[4*index+3] = 1;
nbo[4*index ] = normals[index].x;
nbo[4*index+1] = normals[index].y;
nbo[4*index+2] = normals[index].z;
nbo[4*index+3] = 0;
}
}
//Initialize positions and other attributes of vertices
void initCuda(int xdim, int zdim, float dx, float dz, float y0, float mass)
{
dimx = xdim;
dimz = zdim;
vertexCount = dimx*dimz;
triangleCount = 2*(dimx-1)*(dimz-1);
edgeCount = 3*(dimx-1)*(dimz-1)+dimx-1+dimz-1;
cudaMalloc((void**)&pos, vertexCount*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&predicted_pos, vertexCount*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&vel, vertexCount*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&normals, vertexCount*sizeof(glm::vec3));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&lock_pos, vertexCount*sizeof(bool));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&inv_mass, vertexCount*sizeof(float));
checkCUDAErrorWithLine("Kernel failed!");
// TEMP
//cudaMalloc((void**)&acc, vertexCount*sizeof(glm::vec3));
//checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&triangles, 3*triangleCount*sizeof(int));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&edges, edgeCount*sizeof(Edge));
checkCUDAErrorWithLine("Kernel failed!");
threadsPerBlock = dim3(blockSize, blockSize);
fullBlocksPerGridV = dim3((int)ceil(float(dimx)/float(blockSize)), (int)ceil(float(dimz)/float(blockSize)));
initVertexAttributes<<<fullBlocksPerGridV, threadsPerBlock>>>(pos, predicted_pos, vel, normals,
lock_pos, inv_mass, dimx, dimz, dx, dz, y0, mass);
checkCUDAErrorWithLine("Kernel failed!");
dim3 fullBlocksPerGridV1 = dim3((int)ceil(float(dimx-1)/float(blockSize)), (int)ceil(float(dimz-1)/float(blockSize)));
initTriangles<<<fullBlocksPerGridV1, threadsPerBlock>>>(triangles, edges, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
threadsPerBlockE = blockSize * blockSize;
fullBlocksPerGridE = (int)ceil(float(edgeCount)/float(threadsPerBlockE));
fullBlocksPerGridT = (int)ceil(float(triangleCount)/float(threadsPerBlockE));
// calculate stiffness based on solver iterations
st_stiff = (1 - pow(1-st_stiff, 1.0f/solver_iterations)) * 6;
b_stiff = 1 - pow(1-b_stiff, 1.0f/solver_iterations);
cudaMalloc((void**)&fp_constraints, vertexCount*sizeof(FixedPointConstraint));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&st_constraints, edgeCount*sizeof(StretchConstraint));
checkCUDAErrorWithLine("Kernel failed!");
cudaMalloc((void**)&b_constraints, edgeCount*sizeof(BendConstraint));
checkCUDAErrorWithLine("Kernel failed!");
generateFPConstraints<<<fullBlocksPerGridV, threadsPerBlock>>>(fp_constraints, dimx, dimz,
dx, dz, y0);
checkCUDAErrorWithLine("Kernel failed!");
generateStretchBendConstraints<<<fullBlocksPerGridE, threadsPerBlockE>>>(st_constraints,
b_constraints, pos, edges, triangles, edgeCount);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
void update(float dt)
{
preConstraintsUpdate<<<fullBlocksPerGridV, threadsPerBlock>>>(predicted_pos, vel, inv_mass,
dimx, dimz, gravity, dt);
checkCUDAErrorWithLine("Kernel failed!");
// resolve constraints
resolveFPConstraints<<<fullBlocksPerGridV, threadsPerBlock>>>(fp_constraints, predicted_pos,
lock_pos, fp_stiff, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
for (int i=0; i<solver_iterations; ++i) {
resolveStretchBendConstraints<<<fullBlocksPerGridE, threadsPerBlockE>>>(st_constraints,
b_constraints, predicted_pos, inv_mass, lock_pos, st_stiff, b_stiff, edgeCount);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
// TEMP
//resetAcc<<<fullBlocksPerGridV, threadsPerBlock>>>(acc, dimx, dimz);
//computeAcc<<<fullBlocksPerGridE, threadsPerBlockE>>>(acc, st_constraints, predicted_pos, vel,
// inv_mass, Ks, Kd, edgeCount);
//checkCUDAErrorWithLine("Kernel failed!");
//applyAcc<<<fullBlocksPerGridV, threadsPerBlock>>>(acc, vel, predicted_pos, lock_pos, dt, dimx, dimz);
//checkCUDAErrorWithLine("Kernel failed!");
integrate<<<fullBlocksPerGridV, threadsPerBlock>>>(pos, predicted_pos, vel, dt, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
// compute normals
resetNormal<<<fullBlocksPerGridV, threadsPerBlock>>>(normals, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
computeNormal<<<fullBlocksPerGridT, threadsPerBlockE>>>(pos, normals, triangles, triangleCount);
checkCUDAErrorWithLine("Kernel failed!");
resizeNormal<<<fullBlocksPerGridV, threadsPerBlock>>>(normals, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
void cudaUpdateVAO(float * vbodptr, float * nbodptr)
{
sendToVAO<<<fullBlocksPerGridV, threadsPerBlock>>>(pos, normals, vbodptr, nbodptr, dimx, dimz);
checkCUDAErrorWithLine("Kernel failed!");
cudaThreadSynchronize();
}
void freeCuda() {
cudaFree(pos);
cudaFree(predicted_pos);
cudaFree(vel);
cudaFree(normals);
cudaFree(triangles);
cudaFree(edges);
cudaFree(lock_pos);
cudaFree(inv_mass);
cudaFree(fp_constraints);
cudaFree(st_constraints);
cudaFree(b_constraints);
cudaFree(col_onstraints);
} |
920d637d6e41a763fbbceae87be364140b497c6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "gsimcore.cuh"
//#include "boid.cuh"
#ifdef _WIN32
#include <Windows.h>
#else
#include <sys/time.h>
#endif
//#include "test.cuh"
//#include "socialForce.cuh"
#include "SimpleClone.cuh"
int main(int argc, char *argv[]){
//argv[1]: config.txt
//argv[2]: numAgent
init<SimpleAgentData>(argv[1]);
SimpleModel *model_h = new SimpleModel();
/*Main work started here*/
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
doLoop(model_h);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("execution time: %f\n", time);
}
| 920d637d6e41a763fbbceae87be364140b497c6a.cu | #include "gsimcore.cuh"
//#include "boid.cuh"
#ifdef _WIN32
#include <Windows.h>
#else
#include <sys/time.h>
#endif
//#include "test.cuh"
//#include "socialForce.cuh"
#include "SimpleClone.cuh"
int main(int argc, char *argv[]){
//argv[1]: config.txt
//argv[2]: numAgent
init<SimpleAgentData>(argv[1]);
SimpleModel *model_h = new SimpleModel();
/*Main work started here*/
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
doLoop(model_h);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("execution time: %f\n", time);
}
|
8911f4723795c605046911bb121122bdf35cf07a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MedV4D/Imaging/cuda/GraphOperations.h"
#include "MedV4D/Imaging/cuda/GraphDefinitions.h"
#include "MedV4D/Imaging/cuda/detail/GraphMinCut.cuh"
#include "MedV4D/Imaging/cuda/detail/RegionAdjacencyGraph.cuh"
#include "MedV4D/Imaging/cuda/detail/CUDAFiltersUtils.cuh"
__global__ void
getMarkedRegionsIDsKernel( Buffer3D< uint32 > aLabeledRegions, uint32 aRegionCount, Buffer3D< uint8 > aMarkers, bool *aMarkedRegions1, bool *aMarkedRegions2 )
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
int idx = blockId * blockDim.x + threadIdx.x;
if ( idx < aMarkers.mLength ) {
uint8 val = aMarkers.mData[idx];
if ( val > 0 ) {
uint32 tmp = aLabeledRegions.mData[idx];
if( val > 240 ) {
aMarkedRegions2[tmp] = true;
} else {
aMarkedRegions1[tmp] = true;
}
}
}
}
void
getMarkedRegionsIDs( const Buffer3D< uint32 > &aLabeledRegions, uint32 aRegionCount, const Buffer3D< uint8 > &aMarkers )
{
thrust::device_vector< bool > markedRegions1( aRegionCount+1, false );
thrust::device_vector< bool > markedRegions2( aRegionCount+1, false );
}
template< typename TEType >
void
pushRelabelMaxFlow( const Buffer3D< uint32 > &aLabeledRegions, const Buffer3D< TEType > &aInput )
{
CheckCudaErrorState( "Before pushRelabelMaxFlow() toplevel code" );
M4D::Common::Clock clock;
thrust::device_ptr<uint32 > res = thrust::max_element(
thrust::device_pointer_cast( aLabeledRegions.mData ),
thrust::device_pointer_cast( aLabeledRegions.mData+aLabeledRegions.mLength )
);
size_t regionCount = *res;
D_PRINT( "Region count " << regionCount );
thrust::device_vector< EdgeRecord > edges( regionCount*25 );
thrust::device_vector< float > edgeWeights( edges.size() );
size_t edgeCount = 0;
D_PRINT( "After allocation in " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
fillEdgeList( aLabeledRegions, aInput, edges, edgeWeights, edgeCount );
D_PRINT( "After fillEdgeList() : " << cudaMemoryInfoText() );
//thrust::copy( edgeWeights.begin(), edgeWeights.begin() + edgeCount, std::ostream_iterator<float>(std::cout, "\n"));
pushRelabelMaxFlow( edgeCount, regionCount, edges, edgeWeights, 1, regionCount - 1 ); //TODO - sink source
}
template< typename TEType >
void
pushRelabelMaxFlow( M4D::Imaging::ImageRegion< uint32, 3 > aLabeledRegions, M4D::Imaging::ImageRegion< TEType, 3 > aInput )
{
CheckCudaErrorState( "Before pushRelabelMaxFlow() toplevel code" );
M4D::Common::Clock clock;
D_PRINT( "Before " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
Buffer3D< uint32 > labeledRegionsBuffer = CudaBuffer3DFromImageRegionCopy( aLabeledRegions );
Buffer3D< TEType > inputBuffer = CudaBuffer3DFromImageRegionCopy( aInput );
pushRelabelMaxFlow( labeledRegionsBuffer, inputBuffer );
hipFree( labeledRegionsBuffer.mData );
hipFree( inputBuffer.mData );
}
template void pushRelabelMaxFlow<signed char>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<signed char, 3u>);
template void pushRelabelMaxFlow<unsigned char>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<unsigned char, 3u>);
template void pushRelabelMaxFlow<short>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<short, 3u>);
template void pushRelabelMaxFlow<unsigned short>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<unsigned short, 3u>);
template void pushRelabelMaxFlow<int>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<int, 3u>);
template void pushRelabelMaxFlow<unsigned int>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<unsigned int, 3u>);
template void pushRelabelMaxFlow<long long>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<long long, 3u>);
template void pushRelabelMaxFlow<unsigned long long>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<unsigned long long, 3u>);
template void pushRelabelMaxFlow<float>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<float, 3u>);
template void pushRelabelMaxFlow<double>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<double, 3u>);
| 8911f4723795c605046911bb121122bdf35cf07a.cu | #include "MedV4D/Imaging/cuda/GraphOperations.h"
#include "MedV4D/Imaging/cuda/GraphDefinitions.h"
#include "MedV4D/Imaging/cuda/detail/GraphMinCut.cuh"
#include "MedV4D/Imaging/cuda/detail/RegionAdjacencyGraph.cuh"
#include "MedV4D/Imaging/cuda/detail/CUDAFiltersUtils.cuh"
__global__ void
getMarkedRegionsIDsKernel( Buffer3D< uint32 > aLabeledRegions, uint32 aRegionCount, Buffer3D< uint8 > aMarkers, bool *aMarkedRegions1, bool *aMarkedRegions2 )
{
uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x;
int idx = blockId * blockDim.x + threadIdx.x;
if ( idx < aMarkers.mLength ) {
uint8 val = aMarkers.mData[idx];
if ( val > 0 ) {
uint32 tmp = aLabeledRegions.mData[idx];
if( val > 240 ) {
aMarkedRegions2[tmp] = true;
} else {
aMarkedRegions1[tmp] = true;
}
}
}
}
void
getMarkedRegionsIDs( const Buffer3D< uint32 > &aLabeledRegions, uint32 aRegionCount, const Buffer3D< uint8 > &aMarkers )
{
thrust::device_vector< bool > markedRegions1( aRegionCount+1, false );
thrust::device_vector< bool > markedRegions2( aRegionCount+1, false );
}
template< typename TEType >
void
pushRelabelMaxFlow( const Buffer3D< uint32 > &aLabeledRegions, const Buffer3D< TEType > &aInput )
{
CheckCudaErrorState( "Before pushRelabelMaxFlow() toplevel code" );
M4D::Common::Clock clock;
thrust::device_ptr<uint32 > res = thrust::max_element(
thrust::device_pointer_cast( aLabeledRegions.mData ),
thrust::device_pointer_cast( aLabeledRegions.mData+aLabeledRegions.mLength )
);
size_t regionCount = *res;
D_PRINT( "Region count " << regionCount );
thrust::device_vector< EdgeRecord > edges( regionCount*25 );
thrust::device_vector< float > edgeWeights( edges.size() );
size_t edgeCount = 0;
D_PRINT( "After allocation in " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
fillEdgeList( aLabeledRegions, aInput, edges, edgeWeights, edgeCount );
D_PRINT( "After fillEdgeList() : " << cudaMemoryInfoText() );
//thrust::copy( edgeWeights.begin(), edgeWeights.begin() + edgeCount, std::ostream_iterator<float>(std::cout, "\n"));
pushRelabelMaxFlow( edgeCount, regionCount, edges, edgeWeights, 1, regionCount - 1 ); //TODO - sink source
}
template< typename TEType >
void
pushRelabelMaxFlow( M4D::Imaging::ImageRegion< uint32, 3 > aLabeledRegions, M4D::Imaging::ImageRegion< TEType, 3 > aInput )
{
CheckCudaErrorState( "Before pushRelabelMaxFlow() toplevel code" );
M4D::Common::Clock clock;
D_PRINT( "Before " << __FUNCTION__ << ": " << cudaMemoryInfoText() );
Buffer3D< uint32 > labeledRegionsBuffer = CudaBuffer3DFromImageRegionCopy( aLabeledRegions );
Buffer3D< TEType > inputBuffer = CudaBuffer3DFromImageRegionCopy( aInput );
pushRelabelMaxFlow( labeledRegionsBuffer, inputBuffer );
cudaFree( labeledRegionsBuffer.mData );
cudaFree( inputBuffer.mData );
}
template void pushRelabelMaxFlow<signed char>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<signed char, 3u>);
template void pushRelabelMaxFlow<unsigned char>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<unsigned char, 3u>);
template void pushRelabelMaxFlow<short>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<short, 3u>);
template void pushRelabelMaxFlow<unsigned short>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<unsigned short, 3u>);
template void pushRelabelMaxFlow<int>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<int, 3u>);
template void pushRelabelMaxFlow<unsigned int>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<unsigned int, 3u>);
template void pushRelabelMaxFlow<long long>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<long long, 3u>);
template void pushRelabelMaxFlow<unsigned long long>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<unsigned long long, 3u>);
template void pushRelabelMaxFlow<float>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<float, 3u>);
template void pushRelabelMaxFlow<double>(M4D::Imaging::ImageRegion<unsigned int, 3u>, M4D::Imaging::ImageRegion<double, 3u>);
|
4d6540b31e086201b2a51d24e8b62a7c66ed15b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "digitize.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *idat = NULL;
hipMalloc(&idat, XSIZE*YSIZE);
uint8_t *udat = NULL;
hipMalloc(&udat, XSIZE*YSIZE);
size_t n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
digitize), dim3(gridBlock),dim3(threadBlock), 0, 0, idat,udat,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
digitize), dim3(gridBlock),dim3(threadBlock), 0, 0, idat,udat,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
digitize), dim3(gridBlock),dim3(threadBlock), 0, 0, idat,udat,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4d6540b31e086201b2a51d24e8b62a7c66ed15b8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "digitize.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *idat = NULL;
cudaMalloc(&idat, XSIZE*YSIZE);
uint8_t *udat = NULL;
cudaMalloc(&udat, XSIZE*YSIZE);
size_t n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
digitize<<<gridBlock,threadBlock>>>(idat,udat,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
digitize<<<gridBlock,threadBlock>>>(idat,udat,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
digitize<<<gridBlock,threadBlock>>>(idat,udat,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0278eb5310a0bb684b0dd91b7696bfa9be905ff9.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************************
* This file tests cuda memory management APIs.
*****************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define initializingMode_ 0
#define dataBlockSize_ 16*1024*1024
//#define totalLoop_ 1024*2
#define totalLoop_ 16*3
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#define CHECK_CUDA_RESULT(N) { \
hipError_t result = N; \
if (result != 0) { \
printf("CUDA call on line %d returned error %d\n", __LINE__, \
result); \
printf("Error: %s.\n", hipGetErrorString(result)); \
exit(1); \
} }
/*
* Initialzing and computing kernels
*/
__global__ void vecInit(float *A, float value) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
A[i] = value;
}
__global__ void vecAdd(float* A, float* B, float* C) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
__global__ void vecMultiply(float* A, float* B, float* C) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] * B[i];
}
__global__ void vecMultiplyAndAdd(float* A, float* B, float* C, float* D) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
D[i] = (A[i] + B[i]) * C[i];
}
double currentTimeCPUSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
void getCudaDeviceAttribute(int* value, hipDeviceAttribute_t attr, int device) {
hipDeviceGetAttribute(value, attr, device);
}
void initialData(float *A, unsigned int n, float data, int mode = 0) {
//Using GPU to initialize the data, so that no need to copy memory from GPU to CPU
//printf("Initialize the data.\n");
if (mode == 0) {
dim3 threadsPerBlock(1024);
dim3 numBlocks((n+threadsPerBlock.x-1) / threadsPerBlock.x);
//printf("Threads per block: %d, Blocks: %d.\n", threadsPerBlock.x, numBlocks.x);
hipLaunchKernelGGL(( vecInit), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, A, data);
hipDeviceSynchronize();
} else {
unsigned int i;
for (i = 0; i < n; i++) {
A[i] = data;
}
}
}
void profileAddtionOnDevice(unsigned int nElement, unsigned int totalLoop) {
unsigned int nBytes = nElement * sizeof(float);
if (nBytes < 1024) {
printf("==== Allocate nbytes is %d B.\n", nBytes);
} else if (nBytes >= 1024 && nBytes < 1024*1024) {
printf("==== Allocate nbytes is %d KB.\n", nBytes/1024);
} else {
printf("==== Allocate nbytes is %d MB.\n", nBytes/(1024*1024));
}
//profile device memory as the basement
float *memoryOnDevice_A, *memoryOnDevice_B, *memoryOnDevice_C;
CHECK_CUDA_RESULT(hipMalloc(&memoryOnDevice_A, nBytes));
CHECK_CUDA_RESULT(hipMalloc(&memoryOnDevice_B, nBytes));
CHECK_CUDA_RESULT(hipMalloc(&memoryOnDevice_C, nBytes));
printf("===== inital data begins...\n");
int mode = initializingMode_;
double iStart = currentTimeCPUSecond();
{
initialData(memoryOnDevice_A, nElement, 2.0f, mode);
initialData(memoryOnDevice_B, nElement, 2.0f, mode);
initialData(memoryOnDevice_C, nElement, 0.0f, mode);
}
double iStop = currentTimeCPUSecond();
if (mode == 0) {
printf("==== GPU mode: time for initializing the data: %f.\n", (iStop - iStart)*totalLoop);
} else {
printf("==== CPU mode: time for initializing the data: %f.\n", (iStop - iStart)*totalLoop);
}
printf("===== add data begins...\n");
iStart = currentTimeCPUSecond();
{
dim3 threadsPerBlock(1024);
dim3 numBlocks((nElement+threadsPerBlock.x-1) / threadsPerBlock.x);
hipLaunchKernelGGL(( vecAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, memoryOnDevice_A, memoryOnDevice_B, memoryOnDevice_C);
//hipMemcpy(g_C[loop], g_A[loop], nElem, hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
}
iStop = currentTimeCPUSecond();
printf("==== Time for adding the data: %f.\n", (iStop - iStart)*totalLoop);
//Check the accuracy
float ans = 4.0f;
float *reslut = (float *)malloc(nBytes);
CHECK_CUDA_RESULT(hipMemcpy(reslut, memoryOnDevice_C, nBytes, hipMemcpyDeviceToHost));
int jj;
for (jj = 0; jj < nElement; jj++) {
if (reslut[jj] != ans)
{
printf("Error happens, should enable DEBUG mode to investigate.\n");
break;
}
}
if(jj==nElement) {
printf("==== Testing is passed.\n");
}
hipFree(memoryOnDevice_A);
hipFree(memoryOnDevice_B);
hipFree(memoryOnDevice_C);
}
void profileAddtionOnUM (unsigned int nElement, unsigned int totalLoop) {
unsigned int nBytes = nElement * sizeof(float);
if (nBytes < 1024) {
printf("==== Allocate nbytes is %d B.\n", nBytes);
} else if (nBytes >= 1024 && nBytes < 1024*1024) {
printf("==== Allocate nbytes is %d KB.\n", nBytes/1024);
} else {
printf("==== Allocate nbytes is %d MB.\n", nBytes/(1024*1024));
}
// allocate memory
float *g_A[64*1024], *g_B[64*1024], *g_C[64*1024];
for (int loop=0; loop<totalLoop; loop++) {
// unsigned int flags = hipMemAttachHost;
unsigned int flags = hipMemAttachGlobal;
CHECK_CUDA_RESULT(hipMallocManaged(&g_A[loop], nBytes, flags));
CHECK_CUDA_RESULT(hipMallocManaged(&g_B[loop], nBytes, flags));
CHECK_CUDA_RESULT(hipMallocManaged(&g_C[loop], nBytes, flags));
}
printf("===== inital data begins...\n");
int mode = initializingMode_;
double iStart = currentTimeCPUSecond();
for (int loop=0; loop<totalLoop; loop++) {
initialData(g_A[loop], nElement, 2.0f, mode);
initialData(g_B[loop], nElement, 2.0f, mode);
initialData(g_C[loop], nElement, 0.0f, mode);
}
double iStop = currentTimeCPUSecond();
if (mode == 0) {
printf("==== GPU mode: time for initializing the data: %f.\n", iStop - iStart);
} else {
printf("==== CPU mode: time for initializing the data: %f.\n", iStop - iStart);
}
printf("===== add data begins...\n");
iStart = currentTimeCPUSecond();
for (int loop=0; loop<totalLoop; loop++) {
dim3 threadsPerBlock(1024);
dim3 numBlocks((nElement+threadsPerBlock.x-1) / threadsPerBlock.x);
hipLaunchKernelGGL(( vecAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, g_A[loop], g_B[loop], g_C[loop]);
//hipMemcpy(g_C[loop], g_A[loop], nElem, hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
}
iStop = currentTimeCPUSecond();
printf("==== Time for adding the data: %f.\n", iStop - iStart);
//Check the accuracy
float ans = 4.0f;
//printf("===== ans is %f\n", ans);
int ii, jj;
for (ii = 0; ii < totalLoop; ii++) {
for (jj = 0; jj < nElement; jj++) {
if ((g_C[ii])[jj] != ans)
{
printf("Error happens, should enable DEBUG mode to investigate.\n");
break;
}
}
}
if(ii==totalLoop && jj==nElement) {
printf("==== Testing is passed.\n");
}
#ifdef DEBUG
printf("===== Check the results...\n");
for (int i = 0; i < totalLoop; i++) {
printf("\n======================================================\n");
for (int j = 0; j < 8; j++) {
//if ((g_C[i])[j] != ans)
{
printf("%3.0f ", (g_A[i])[j]);
}
}
}
printf("\n");
float ans = 4.0f;
printf("===== ans is %f\n", ans);
for (int i = 0; i < totalLoop; i++) {
printf("\n======================================================\n");
for (int j = 0; j < 8; j++) {
//if ((g_C[i])[j] != ans)
{
printf("%3.0f ", (g_C[i])[j]);
}
}
}
printf("\n");
#endif
for (int i = 0; i < totalLoop; i++) {
hipFree(g_A[i]);
hipFree(g_B[i]);
hipFree(g_C[i]);
}
}
void profileAdditionOperation(int dev) {
int totalLoop = totalLoop_;
printf("\n==== ==== Profile Addtion On Device.\n");
profileAddtionOnDevice(dataBlockSize_, totalLoop);
printf("\n==== ==== Profile Addtion On Unified Memory.\n");
profileAddtionOnUM(dataBlockSize_, totalLoop);
}
int main(int argc, char* argv[]) {
// set up device
int dev = 0;
hipSetDevice(dev);
// get device properties
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
// check uva supporting
if (deviceProp.unifiedAddressing) {
printf("Device %d supports uva memory!\n", dev);
} else {
printf("Device %d does not support uva memory!\n", dev);
exit(EXIT_SUCCESS);
}
// Check if supports managed memory
//CHECK_CUDA_RESULT(hipDeviceGetAttribute(&val, hipDeviceAttributeManagedMemory, dev));
// Check concurrent managed access, for cuda 8.0
//CHECK_CUDA_RESULT(hipDeviceGetAttribute(&val, hipDeviceAttributeConcurrentManagedAccess, dev));
profileAdditionOperation(dev);
hipDeviceReset();
}
| 0278eb5310a0bb684b0dd91b7696bfa9be905ff9.cu | /*****************************************************
* This file tests cuda memory management APIs.
*****************************************************/
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define initializingMode_ 0
#define dataBlockSize_ 16*1024*1024
//#define totalLoop_ 1024*2
#define totalLoop_ 16*3
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#define CHECK_CUDA_RESULT(N) { \
cudaError_t result = N; \
if (result != 0) { \
printf("CUDA call on line %d returned error %d\n", __LINE__, \
result); \
printf("Error: %s.\n", cudaGetErrorString(result)); \
exit(1); \
} }
/*
* Initialzing and computing kernels
*/
__global__ void vecInit(float *A, float value) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
A[i] = value;
}
__global__ void vecAdd(float* A, float* B, float* C) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
__global__ void vecMultiply(float* A, float* B, float* C) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] * B[i];
}
__global__ void vecMultiplyAndAdd(float* A, float* B, float* C, float* D) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
D[i] = (A[i] + B[i]) * C[i];
}
double currentTimeCPUSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
void getCudaDeviceAttribute(int* value, cudaDeviceAttr attr, int device) {
cudaDeviceGetAttribute(value, attr, device);
}
void initialData(float *A, unsigned int n, float data, int mode = 0) {
//Using GPU to initialize the data, so that no need to copy memory from GPU to CPU
//printf("Initialize the data.\n");
if (mode == 0) {
dim3 threadsPerBlock(1024);
dim3 numBlocks((n+threadsPerBlock.x-1) / threadsPerBlock.x);
//printf("Threads per block: %d, Blocks: %d.\n", threadsPerBlock.x, numBlocks.x);
vecInit<<<numBlocks, threadsPerBlock>>>(A, data);
cudaDeviceSynchronize();
} else {
unsigned int i;
for (i = 0; i < n; i++) {
A[i] = data;
}
}
}
void profileAddtionOnDevice(unsigned int nElement, unsigned int totalLoop) {
unsigned int nBytes = nElement * sizeof(float);
if (nBytes < 1024) {
printf("==== Allocate nbytes is %d B.\n", nBytes);
} else if (nBytes >= 1024 && nBytes < 1024*1024) {
printf("==== Allocate nbytes is %d KB.\n", nBytes/1024);
} else {
printf("==== Allocate nbytes is %d MB.\n", nBytes/(1024*1024));
}
//profile device memory as the basement
float *memoryOnDevice_A, *memoryOnDevice_B, *memoryOnDevice_C;
CHECK_CUDA_RESULT(cudaMalloc(&memoryOnDevice_A, nBytes));
CHECK_CUDA_RESULT(cudaMalloc(&memoryOnDevice_B, nBytes));
CHECK_CUDA_RESULT(cudaMalloc(&memoryOnDevice_C, nBytes));
printf("===== inital data begins...\n");
int mode = initializingMode_;
double iStart = currentTimeCPUSecond();
{
initialData(memoryOnDevice_A, nElement, 2.0f, mode);
initialData(memoryOnDevice_B, nElement, 2.0f, mode);
initialData(memoryOnDevice_C, nElement, 0.0f, mode);
}
double iStop = currentTimeCPUSecond();
if (mode == 0) {
printf("==== GPU mode: time for initializing the data: %f.\n", (iStop - iStart)*totalLoop);
} else {
printf("==== CPU mode: time for initializing the data: %f.\n", (iStop - iStart)*totalLoop);
}
printf("===== add data begins...\n");
iStart = currentTimeCPUSecond();
{
dim3 threadsPerBlock(1024);
dim3 numBlocks((nElement+threadsPerBlock.x-1) / threadsPerBlock.x);
vecAdd<<<numBlocks, threadsPerBlock>>>(memoryOnDevice_A, memoryOnDevice_B, memoryOnDevice_C);
//cudaMemcpy(g_C[loop], g_A[loop], nElem, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
}
iStop = currentTimeCPUSecond();
printf("==== Time for adding the data: %f.\n", (iStop - iStart)*totalLoop);
//Check the accuracy
float ans = 4.0f;
float *reslut = (float *)malloc(nBytes);
CHECK_CUDA_RESULT(cudaMemcpy(reslut, memoryOnDevice_C, nBytes, cudaMemcpyDeviceToHost));
int jj;
for (jj = 0; jj < nElement; jj++) {
if (reslut[jj] != ans)
{
printf("Error happens, should enable DEBUG mode to investigate.\n");
break;
}
}
if(jj==nElement) {
printf("==== Testing is passed.\n");
}
cudaFree(memoryOnDevice_A);
cudaFree(memoryOnDevice_B);
cudaFree(memoryOnDevice_C);
}
void profileAddtionOnUM (unsigned int nElement, unsigned int totalLoop) {
unsigned int nBytes = nElement * sizeof(float);
if (nBytes < 1024) {
printf("==== Allocate nbytes is %d B.\n", nBytes);
} else if (nBytes >= 1024 && nBytes < 1024*1024) {
printf("==== Allocate nbytes is %d KB.\n", nBytes/1024);
} else {
printf("==== Allocate nbytes is %d MB.\n", nBytes/(1024*1024));
}
// allocate memory
float *g_A[64*1024], *g_B[64*1024], *g_C[64*1024];
for (int loop=0; loop<totalLoop; loop++) {
// unsigned int flags = cudaMemAttachHost;
unsigned int flags = cudaMemAttachGlobal;
CHECK_CUDA_RESULT(cudaMallocManaged(&g_A[loop], nBytes, flags));
CHECK_CUDA_RESULT(cudaMallocManaged(&g_B[loop], nBytes, flags));
CHECK_CUDA_RESULT(cudaMallocManaged(&g_C[loop], nBytes, flags));
}
printf("===== inital data begins...\n");
int mode = initializingMode_;
double iStart = currentTimeCPUSecond();
for (int loop=0; loop<totalLoop; loop++) {
initialData(g_A[loop], nElement, 2.0f, mode);
initialData(g_B[loop], nElement, 2.0f, mode);
initialData(g_C[loop], nElement, 0.0f, mode);
}
double iStop = currentTimeCPUSecond();
if (mode == 0) {
printf("==== GPU mode: time for initializing the data: %f.\n", iStop - iStart);
} else {
printf("==== CPU mode: time for initializing the data: %f.\n", iStop - iStart);
}
printf("===== add data begins...\n");
iStart = currentTimeCPUSecond();
for (int loop=0; loop<totalLoop; loop++) {
dim3 threadsPerBlock(1024);
dim3 numBlocks((nElement+threadsPerBlock.x-1) / threadsPerBlock.x);
vecAdd<<<numBlocks, threadsPerBlock>>>(g_A[loop], g_B[loop], g_C[loop]);
//cudaMemcpy(g_C[loop], g_A[loop], nElem, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
}
iStop = currentTimeCPUSecond();
printf("==== Time for adding the data: %f.\n", iStop - iStart);
//Check the accuracy
float ans = 4.0f;
//printf("===== ans is %f\n", ans);
int ii, jj;
for (ii = 0; ii < totalLoop; ii++) {
for (jj = 0; jj < nElement; jj++) {
if ((g_C[ii])[jj] != ans)
{
printf("Error happens, should enable DEBUG mode to investigate.\n");
break;
}
}
}
if(ii==totalLoop && jj==nElement) {
printf("==== Testing is passed.\n");
}
#ifdef DEBUG
printf("===== Check the results...\n");
for (int i = 0; i < totalLoop; i++) {
printf("\n======================================================\n");
for (int j = 0; j < 8; j++) {
//if ((g_C[i])[j] != ans)
{
printf("%3.0f ", (g_A[i])[j]);
}
}
}
printf("\n");
float ans = 4.0f;
printf("===== ans is %f\n", ans);
for (int i = 0; i < totalLoop; i++) {
printf("\n======================================================\n");
for (int j = 0; j < 8; j++) {
//if ((g_C[i])[j] != ans)
{
printf("%3.0f ", (g_C[i])[j]);
}
}
}
printf("\n");
#endif
for (int i = 0; i < totalLoop; i++) {
cudaFree(g_A[i]);
cudaFree(g_B[i]);
cudaFree(g_C[i]);
}
}
void profileAdditionOperation(int dev) {
int totalLoop = totalLoop_;
printf("\n==== ==== Profile Addtion On Device.\n");
profileAddtionOnDevice(dataBlockSize_, totalLoop);
printf("\n==== ==== Profile Addtion On Unified Memory.\n");
profileAddtionOnUM(dataBlockSize_, totalLoop);
}
int main(int argc, char* argv[]) {
// set up device
int dev = 0;
cudaSetDevice(dev);
// get device properties
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
// check uva supporting
if (deviceProp.unifiedAddressing) {
printf("Device %d supports uva memory!\n", dev);
} else {
printf("Device %d does not support uva memory!\n", dev);
exit(EXIT_SUCCESS);
}
// Check if supports managed memory
//CHECK_CUDA_RESULT(cudaDeviceGetAttribute(&val, cudaDevAttrManagedMemory, dev));
// Check concurrent managed access, for cuda 8.0
//CHECK_CUDA_RESULT(cudaDeviceGetAttribute(&val, cudaDevAttrConcurrentManagedAccess, dev));
profileAdditionOperation(dev);
cudaDeviceReset();
}
|
7e156b755001bfd897fcfeac398ec25d44419bef.hip | // !!! This is a file automatically generated by hipify!!!
/* -------------------------------------------------------
UNTILED CODE GENERATED BY FORMA COMPILER
---------------------------------------------------------*/
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, double * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_0__ <= (N-2)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_1__ <= (M-2)){
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_2__ <= (L-2)){
double __temp_0__;
__temp_0__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_1__;
__temp_1__ = (input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))] - __temp_0__);
double __temp_2__;
__temp_2__ = (__temp_1__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(-1)))]);
double __temp_3__;
__temp_3__ = (0.125000f * __temp_2__);
double __temp_4__;
__temp_4__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_5__;
__temp_5__ = (input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))] - __temp_4__);
double __temp_6__;
__temp_6__ = (__temp_5__ + input[__iter_0__+(N-0)*(__iter_1__+(-1)+(M-0)*(__iter_2__))]);
double __temp_7__;
__temp_7__ = (0.125000f * __temp_6__);
double __temp_8__;
__temp_8__ = (__temp_3__ + __temp_7__);
double __temp_9__;
__temp_9__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_10__;
__temp_10__ = (input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] - __temp_9__);
double __temp_11__;
__temp_11__ = (__temp_10__ + input[__iter_0__+(-1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_12__;
__temp_12__ = (0.125000f * __temp_11__);
double __temp_13__;
__temp_13__ = (__temp_8__ + __temp_12__);
double __temp_14__;
__temp_14__ = (__temp_13__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
__var_4__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] = __temp_14__;
}
}
}
}
__global__ void __kernel___forma_kernel__1__(double * __restrict__ __var_4__, int L, int M, int N, double * __restrict__ __var_3__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_3__;
__iter_3__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_3__ <= (N-2)){
int __iter_4__;
__iter_4__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_4__ <= (M-2)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_5__ <= (L-2)){
double __temp_15__;
__temp_15__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
double __temp_16__;
__temp_16__ = (__var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(1)))] - __temp_15__);
double __temp_17__;
__temp_17__ = (__temp_16__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(-1)))]);
double __temp_18__;
__temp_18__ = (0.125000f * __temp_17__);
double __temp_19__;
__temp_19__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
double __temp_20__;
__temp_20__ = (__var_4__[__iter_3__+(N-0)*(__iter_4__+(1)+(M-0)*(__iter_5__))] - __temp_19__);
double __temp_21__;
__temp_21__ = (__temp_20__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(-1)+(M-0)*(__iter_5__))]);
double __temp_22__;
__temp_22__ = (0.125000f * __temp_21__);
double __temp_23__;
__temp_23__ = (__temp_18__ + __temp_22__);
double __temp_24__;
__temp_24__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
double __temp_25__;
__temp_25__ = (__var_4__[__iter_3__+(1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))] - __temp_24__);
double __temp_26__;
__temp_26__ = (__temp_25__ + __var_4__[__iter_3__+(-1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
double __temp_27__;
__temp_27__ = (0.125000f * __temp_26__);
double __temp_28__;
__temp_28__ = (__temp_23__ + __temp_27__);
double __temp_29__;
__temp_29__ = (__temp_28__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
__var_3__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))] = __temp_29__;
}
}
}
}
__global__ void __kernel___forma_kernel__2__(double * __restrict__ __var_3__, int L, int M, int N, double * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_6__;
__iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_6__ <= (N-2)){
int __iter_7__;
__iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_7__ <= (M-2)){
int __iter_8__;
__iter_8__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_8__ <= (L-2)){
double __temp_30__;
__temp_30__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
double __temp_31__;
__temp_31__ = (__var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__+(1)))] - __temp_30__);
double __temp_32__;
__temp_32__ = (__temp_31__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__+(-1)))]);
double __temp_33__;
__temp_33__ = (0.125000f * __temp_32__);
double __temp_34__;
__temp_34__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
double __temp_35__;
__temp_35__ = (__var_3__[__iter_6__+(N-0)*(__iter_7__+(1)+(M-0)*(__iter_8__))] - __temp_34__);
double __temp_36__;
__temp_36__ = (__temp_35__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(-1)+(M-0)*(__iter_8__))]);
double __temp_37__;
__temp_37__ = (0.125000f * __temp_36__);
double __temp_38__;
__temp_38__ = (__temp_33__ + __temp_37__);
double __temp_39__;
__temp_39__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
double __temp_40__;
__temp_40__ = (__var_3__[__iter_6__+(1)+(N-0)*(__iter_7__+(M-0)*(__iter_8__))] - __temp_39__);
double __temp_41__;
__temp_41__ = (__temp_40__ + __var_3__[__iter_6__+(-1)+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
double __temp_42__;
__temp_42__ = (0.125000f * __temp_41__);
double __temp_43__;
__temp_43__ = (__temp_38__ + __temp_42__);
double __temp_44__;
__temp_44__ = (__temp_43__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
__var_2__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))] = __temp_44__;
}
}
}
}
__global__ void __kernel___forma_kernel__3__(double * __restrict__ __var_2__, int L, int M, int N, double * __restrict__ __var_1__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_9__;
__iter_9__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_9__ <= (N-2)){
int __iter_10__;
__iter_10__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_10__ <= (M-2)){
int __iter_11__;
__iter_11__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_11__ <= (L-2)){
double __temp_45__;
__temp_45__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
double __temp_46__;
__temp_46__ = (__var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__+(1)))] - __temp_45__);
double __temp_47__;
__temp_47__ = (__temp_46__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__+(-1)))]);
double __temp_48__;
__temp_48__ = (0.125000f * __temp_47__);
double __temp_49__;
__temp_49__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
double __temp_50__;
__temp_50__ = (__var_2__[__iter_9__+(N-0)*(__iter_10__+(1)+(M-0)*(__iter_11__))] - __temp_49__);
double __temp_51__;
__temp_51__ = (__temp_50__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(-1)+(M-0)*(__iter_11__))]);
double __temp_52__;
__temp_52__ = (0.125000f * __temp_51__);
double __temp_53__;
__temp_53__ = (__temp_48__ + __temp_52__);
double __temp_54__;
__temp_54__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
double __temp_55__;
__temp_55__ = (__var_2__[__iter_9__+(1)+(N-0)*(__iter_10__+(M-0)*(__iter_11__))] - __temp_54__);
double __temp_56__;
__temp_56__ = (__temp_55__ + __var_2__[__iter_9__+(-1)+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
double __temp_57__;
__temp_57__ = (0.125000f * __temp_56__);
double __temp_58__;
__temp_58__ = (__temp_53__ + __temp_57__);
double __temp_59__;
__temp_59__ = (__temp_58__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
__var_1__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))] = __temp_59__;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void host_code(double * h_input, double * __var_0__, int L, int M, int N) {
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
hipMalloc(&__var_2__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
double * __var_3__;
hipMalloc(&__var_3__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
double * __var_4__;
hipMalloc(&__var_4__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1;
int __size_2___kernel___forma_kernel__0__ = ((L-2) - 1 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 16;
int __block_1___kernel___forma_kernel__0__ = 4;
int __block_2___kernel___forma_kernel__0__ = 4;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, L, M, N, __var_4__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_4__, L, M, N, __var_3__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_3__, L, M, N, __var_2__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, L, M, N, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
double elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
hipFree(__var_3__);
hipFree(__var_4__);
}
/*Host Free End*/
| 7e156b755001bfd897fcfeac398ec25d44419bef.cu | /* -------------------------------------------------------
UNTILED CODE GENERATED BY FORMA COMPILER
---------------------------------------------------------*/
#include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * __restrict__ input, int L, int M, int N, double * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_0__ <= (N-2)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_1__ <= (M-2)){
int __iter_2__;
__iter_2__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_2__ <= (L-2)){
double __temp_0__;
__temp_0__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_1__;
__temp_1__ = (input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(1)))] - __temp_0__);
double __temp_2__;
__temp_2__ = (__temp_1__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__+(-1)))]);
double __temp_3__;
__temp_3__ = (0.125000f * __temp_2__);
double __temp_4__;
__temp_4__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_5__;
__temp_5__ = (input[__iter_0__+(N-0)*(__iter_1__+(1)+(M-0)*(__iter_2__))] - __temp_4__);
double __temp_6__;
__temp_6__ = (__temp_5__ + input[__iter_0__+(N-0)*(__iter_1__+(-1)+(M-0)*(__iter_2__))]);
double __temp_7__;
__temp_7__ = (0.125000f * __temp_6__);
double __temp_8__;
__temp_8__ = (__temp_3__ + __temp_7__);
double __temp_9__;
__temp_9__ = (2.000000f * input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_10__;
__temp_10__ = (input[__iter_0__+(1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] - __temp_9__);
double __temp_11__;
__temp_11__ = (__temp_10__ + input[__iter_0__+(-1)+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
double __temp_12__;
__temp_12__ = (0.125000f * __temp_11__);
double __temp_13__;
__temp_13__ = (__temp_8__ + __temp_12__);
double __temp_14__;
__temp_14__ = (__temp_13__ + input[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))]);
__var_4__[__iter_0__+(N-0)*(__iter_1__+(M-0)*(__iter_2__))] = __temp_14__;
}
}
}
}
__global__ void __kernel___forma_kernel__1__(double * __restrict__ __var_4__, int L, int M, int N, double * __restrict__ __var_3__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_3__;
__iter_3__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_3__ <= (N-2)){
int __iter_4__;
__iter_4__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_4__ <= (M-2)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_5__ <= (L-2)){
double __temp_15__;
__temp_15__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
double __temp_16__;
__temp_16__ = (__var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(1)))] - __temp_15__);
double __temp_17__;
__temp_17__ = (__temp_16__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__+(-1)))]);
double __temp_18__;
__temp_18__ = (0.125000f * __temp_17__);
double __temp_19__;
__temp_19__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
double __temp_20__;
__temp_20__ = (__var_4__[__iter_3__+(N-0)*(__iter_4__+(1)+(M-0)*(__iter_5__))] - __temp_19__);
double __temp_21__;
__temp_21__ = (__temp_20__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(-1)+(M-0)*(__iter_5__))]);
double __temp_22__;
__temp_22__ = (0.125000f * __temp_21__);
double __temp_23__;
__temp_23__ = (__temp_18__ + __temp_22__);
double __temp_24__;
__temp_24__ = (2.000000f * __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
double __temp_25__;
__temp_25__ = (__var_4__[__iter_3__+(1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))] - __temp_24__);
double __temp_26__;
__temp_26__ = (__temp_25__ + __var_4__[__iter_3__+(-1)+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
double __temp_27__;
__temp_27__ = (0.125000f * __temp_26__);
double __temp_28__;
__temp_28__ = (__temp_23__ + __temp_27__);
double __temp_29__;
__temp_29__ = (__temp_28__ + __var_4__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))]);
__var_3__[__iter_3__+(N-0)*(__iter_4__+(M-0)*(__iter_5__))] = __temp_29__;
}
}
}
}
__global__ void __kernel___forma_kernel__2__(double * __restrict__ __var_3__, int L, int M, int N, double * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_6__;
__iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_6__ <= (N-2)){
int __iter_7__;
__iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_7__ <= (M-2)){
int __iter_8__;
__iter_8__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_8__ <= (L-2)){
double __temp_30__;
__temp_30__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
double __temp_31__;
__temp_31__ = (__var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__+(1)))] - __temp_30__);
double __temp_32__;
__temp_32__ = (__temp_31__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__+(-1)))]);
double __temp_33__;
__temp_33__ = (0.125000f * __temp_32__);
double __temp_34__;
__temp_34__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
double __temp_35__;
__temp_35__ = (__var_3__[__iter_6__+(N-0)*(__iter_7__+(1)+(M-0)*(__iter_8__))] - __temp_34__);
double __temp_36__;
__temp_36__ = (__temp_35__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(-1)+(M-0)*(__iter_8__))]);
double __temp_37__;
__temp_37__ = (0.125000f * __temp_36__);
double __temp_38__;
__temp_38__ = (__temp_33__ + __temp_37__);
double __temp_39__;
__temp_39__ = (2.000000f * __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
double __temp_40__;
__temp_40__ = (__var_3__[__iter_6__+(1)+(N-0)*(__iter_7__+(M-0)*(__iter_8__))] - __temp_39__);
double __temp_41__;
__temp_41__ = (__temp_40__ + __var_3__[__iter_6__+(-1)+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
double __temp_42__;
__temp_42__ = (0.125000f * __temp_41__);
double __temp_43__;
__temp_43__ = (__temp_38__ + __temp_42__);
double __temp_44__;
__temp_44__ = (__temp_43__ + __var_3__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))]);
__var_2__[__iter_6__+(N-0)*(__iter_7__+(M-0)*(__iter_8__))] = __temp_44__;
}
}
}
}
__global__ void __kernel___forma_kernel__3__(double * __restrict__ __var_2__, int L, int M, int N, double * __restrict__ __var_1__){
int FORMA_BLOCKDIM_Z = (int)(blockDim.z);
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_9__;
__iter_9__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_9__ <= (N-2)){
int __iter_10__;
__iter_10__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_10__ <= (M-2)){
int __iter_11__;
__iter_11__ = (int)(blockIdx.z)*(int)(FORMA_BLOCKDIM_Z) + (int)(threadIdx.z) + 1;
if(__iter_11__ <= (L-2)){
double __temp_45__;
__temp_45__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
double __temp_46__;
__temp_46__ = (__var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__+(1)))] - __temp_45__);
double __temp_47__;
__temp_47__ = (__temp_46__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__+(-1)))]);
double __temp_48__;
__temp_48__ = (0.125000f * __temp_47__);
double __temp_49__;
__temp_49__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
double __temp_50__;
__temp_50__ = (__var_2__[__iter_9__+(N-0)*(__iter_10__+(1)+(M-0)*(__iter_11__))] - __temp_49__);
double __temp_51__;
__temp_51__ = (__temp_50__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(-1)+(M-0)*(__iter_11__))]);
double __temp_52__;
__temp_52__ = (0.125000f * __temp_51__);
double __temp_53__;
__temp_53__ = (__temp_48__ + __temp_52__);
double __temp_54__;
__temp_54__ = (2.000000f * __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
double __temp_55__;
__temp_55__ = (__var_2__[__iter_9__+(1)+(N-0)*(__iter_10__+(M-0)*(__iter_11__))] - __temp_54__);
double __temp_56__;
__temp_56__ = (__temp_55__ + __var_2__[__iter_9__+(-1)+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
double __temp_57__;
__temp_57__ = (0.125000f * __temp_56__);
double __temp_58__;
__temp_58__ = (__temp_53__ + __temp_57__);
double __temp_59__;
__temp_59__ = (__temp_58__ + __var_2__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))]);
__var_1__[__iter_9__+(N-0)*(__iter_10__+(M-0)*(__iter_11__))] = __temp_59__;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void host_code(double * h_input, double * __var_0__, int L, int M, int N) {
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*((L-0)*(M-0)*(N-0)), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
cudaMalloc(&__var_2__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
double * __var_3__;
cudaMalloc(&__var_3__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
double * __var_4__;
cudaMalloc(&__var_4__,sizeof(double)*((L-0)*(M-0)*(N-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1;
int __size_2___kernel___forma_kernel__0__ = ((L-2) - 1 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 16;
int __block_1___kernel___forma_kernel__0__ = 4;
int __block_2___kernel___forma_kernel__0__ = 4;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __grid_2___kernel___forma_kernel__0__ = FORMA_CEIL(__size_2___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, L, M, N, __var_4__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_4__, L, M, N, __var_3__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_3__, L, M, N, __var_2__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, L, M, N, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((L-0)*(M-0)*(N-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
double elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
cudaFree(__var_3__);
cudaFree(__var_4__);
}
/*Host Free End*/
|
73340c0395882e6fea5cce02286c80f5ae9026e0.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <execution/LaunchContext.h>
#include <helpers/cublasHelper.h>
#include <helpers/logger.h>
#include <thread>
thread_local sd::ContextBuffers contextBuffers = sd::ContextBuffers();
namespace sd {
std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>();
std::mutex LaunchContext::_mutex;
SD_MAP_IMPL<int, std::mutex*> LaunchContext::_deviceMutexes;
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext(hipStream_t* cudaStream, hipStream_t& specialCudaStream, void* reductionPointer,
void* scalarPointer, int* allocationPointer) {
//_cudaStream = cudaStream;
//_cudaSpecialStream = &specialCudaStream; // ideal is = new hipStream_t; *_cudaSpecialStream = specialCudaStream;
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = allocationPointer;
_workspace = nullptr;
_isAllocated = false;
}
std::mutex* LaunchContext::deviceMutex() {
auto deviceId = AffinityManager::currentDeviceId();
return _deviceMutexes[deviceId];
}
LaunchContext::~LaunchContext() {
if (_isAllocated) {
}
}
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext() {
// default constructor, just to make clang/ranlib happy
_workspace = nullptr;
_deviceID = 0;
_isAllocated = true;
}
LaunchContext::LaunchContext(sd::Pointer cudaStream, sd::Pointer reductionPointer, sd::Pointer scalarPointer,
sd::Pointer allocationPointer) {
_isAllocated = false;
//_cudaStream = reinterpret_cast<hipStream_t*>(cudaStream);
// _cudaSpecialStream = reinterpret_cast<hipStream_t*>(cudaStream);
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = reinterpret_cast<int *>(allocationPointer);
}
LaunchContext* LaunchContext::defaultContext() {
/**
* This method returns LaunchContext, that has multiple entities within:
* 1) temporary buffers. they must be per-thread
* 2) CUDA stream. it must be either per-thread or per-device
* 3) cuBLAS handle. it must be per-device
*/
auto deviceId = AffinityManager::currentDeviceId();
{
// we need this block synchronous, to avoid double initialization etc
std::lock_guard<std::mutex> lock(_mutex);
if (LaunchContext::_contexts.empty()) {
// create one context per device
auto numDevices = AffinityManager::numberOfDevices();
_contexts.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
_deviceMutexes[e] = new std::mutex();
AffinityManager::setCurrentNativeDevice(e);
LaunchContext::_contexts[e] = std::make_shared<LaunchContext>();
}
// don't forget to restore device back again
AffinityManager::setCurrentNativeDevice(deviceId);
}
}
// return context for current device
return LaunchContext::_contexts[deviceId].get();
}
void* LaunchContext::getReductionPointer() const { return contextBuffers.reductionBuffer(); };
void* LaunchContext::getScalarPointer() const { return contextBuffers.scalarBuffer(); };
int* LaunchContext::getAllocationPointer() const { return reinterpret_cast<int*>(contextBuffers.allocationBuffer()); };
void* LaunchContext::getCublasHandle() const { return CublasHelper::getInstance().handle(); };
void* LaunchContext::getCusolverHandle() const { return CublasHelper::getInstance().solver(); };
hipStream_t* LaunchContext::getCudaStream() const {
return reinterpret_cast<hipStream_t*>(contextBuffers.execStream());
};
hipStream_t* LaunchContext::getCudaSpecialStream() const {
return reinterpret_cast<hipStream_t*>(contextBuffers.specialStream());
;
};
void LaunchContext::setReductionPointer(void* reductionPointer) {
contextBuffers.setReductionBuffer(reductionPointer);
};
void LaunchContext::setScalarPointer(void* scalarPointer) { contextBuffers.setScalarBuffer(scalarPointer); };
void LaunchContext::setAllocationPointer(int* allocationPointer) {
contextBuffers.setAllocationBuffer(allocationPointer);
};
void LaunchContext::setCudaStream(hipStream_t* cudaStream){
//_cudaStream = cudaStream;
};
void LaunchContext::setCudaSpecialStream(hipStream_t* cudaStream){
//_cudaSpecialStream = cudaStream;
};
void LaunchContext::setCublasHandle(void* handle) { _cublasHandle = handle; };
void LaunchContext::swapContextBuffers(ContextBuffers& buffers) { contextBuffers = buffers; };
void LaunchContext::releaseBuffers() {
// sd_printf("LaunchContext::releaseBuffers() was invoked\n", "");
contextBuffers.release();
}
bool LaunchContext::isInitialized() { return contextBuffers.isInitialized(); }
void* LaunchContext::getCuDnnHandle() const { return CublasHelper::getInstance().cudnn(); }
sd::ErrorReference* LaunchContext::errorReference() { return contextBuffers.errorReference(); }
void* LaunchContext::engine() { return _engine; }
} // namespace sd
| 73340c0395882e6fea5cce02286c80f5ae9026e0.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <execution/LaunchContext.h>
#include <helpers/cublasHelper.h>
#include <helpers/logger.h>
#include <thread>
thread_local sd::ContextBuffers contextBuffers = sd::ContextBuffers();
namespace sd {
std::vector<std::shared_ptr<LaunchContext>> LaunchContext::_contexts = std::vector<std::shared_ptr<LaunchContext>>();
std::mutex LaunchContext::_mutex;
SD_MAP_IMPL<int, std::mutex*> LaunchContext::_deviceMutexes;
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext(cudaStream_t* cudaStream, cudaStream_t& specialCudaStream, void* reductionPointer,
void* scalarPointer, int* allocationPointer) {
//_cudaStream = cudaStream;
//_cudaSpecialStream = &specialCudaStream; // ideal is = new cudaStream_t; *_cudaSpecialStream = specialCudaStream;
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = allocationPointer;
_workspace = nullptr;
_isAllocated = false;
}
std::mutex* LaunchContext::deviceMutex() {
auto deviceId = AffinityManager::currentDeviceId();
return _deviceMutexes[deviceId];
}
LaunchContext::~LaunchContext() {
if (_isAllocated) {
}
}
////////////////////////////////////////////////////////////////////////
LaunchContext::LaunchContext() {
// default constructor, just to make clang/ranlib happy
_workspace = nullptr;
_deviceID = 0;
_isAllocated = true;
}
LaunchContext::LaunchContext(sd::Pointer cudaStream, sd::Pointer reductionPointer, sd::Pointer scalarPointer,
sd::Pointer allocationPointer) {
_isAllocated = false;
//_cudaStream = reinterpret_cast<cudaStream_t*>(cudaStream);
// _cudaSpecialStream = reinterpret_cast<cudaStream_t*>(cudaStream);
//_reductionPointer = reductionPointer;
//_scalarPointer = scalarPointer;
//_allocationPointer = reinterpret_cast<int *>(allocationPointer);
}
LaunchContext* LaunchContext::defaultContext() {
/**
* This method returns LaunchContext, that has multiple entities within:
* 1) temporary buffers. they must be per-thread
* 2) CUDA stream. it must be either per-thread or per-device
* 3) cuBLAS handle. it must be per-device
*/
auto deviceId = AffinityManager::currentDeviceId();
{
// we need this block synchronous, to avoid double initialization etc
std::lock_guard<std::mutex> lock(_mutex);
if (LaunchContext::_contexts.empty()) {
// create one context per device
auto numDevices = AffinityManager::numberOfDevices();
_contexts.resize(numDevices);
for (int e = 0; e < numDevices; e++) {
_deviceMutexes[e] = new std::mutex();
AffinityManager::setCurrentNativeDevice(e);
LaunchContext::_contexts[e] = std::make_shared<LaunchContext>();
}
// don't forget to restore device back again
AffinityManager::setCurrentNativeDevice(deviceId);
}
}
// return context for current device
return LaunchContext::_contexts[deviceId].get();
}
void* LaunchContext::getReductionPointer() const { return contextBuffers.reductionBuffer(); };
void* LaunchContext::getScalarPointer() const { return contextBuffers.scalarBuffer(); };
int* LaunchContext::getAllocationPointer() const { return reinterpret_cast<int*>(contextBuffers.allocationBuffer()); };
void* LaunchContext::getCublasHandle() const { return CublasHelper::getInstance().handle(); };
void* LaunchContext::getCusolverHandle() const { return CublasHelper::getInstance().solver(); };
cudaStream_t* LaunchContext::getCudaStream() const {
return reinterpret_cast<cudaStream_t*>(contextBuffers.execStream());
};
cudaStream_t* LaunchContext::getCudaSpecialStream() const {
return reinterpret_cast<cudaStream_t*>(contextBuffers.specialStream());
;
};
void LaunchContext::setReductionPointer(void* reductionPointer) {
contextBuffers.setReductionBuffer(reductionPointer);
};
void LaunchContext::setScalarPointer(void* scalarPointer) { contextBuffers.setScalarBuffer(scalarPointer); };
void LaunchContext::setAllocationPointer(int* allocationPointer) {
contextBuffers.setAllocationBuffer(allocationPointer);
};
void LaunchContext::setCudaStream(cudaStream_t* cudaStream){
//_cudaStream = cudaStream;
};
void LaunchContext::setCudaSpecialStream(cudaStream_t* cudaStream){
//_cudaSpecialStream = cudaStream;
};
void LaunchContext::setCublasHandle(void* handle) { _cublasHandle = handle; };
void LaunchContext::swapContextBuffers(ContextBuffers& buffers) { contextBuffers = buffers; };
void LaunchContext::releaseBuffers() {
// sd_printf("LaunchContext::releaseBuffers() was invoked\n", "");
contextBuffers.release();
}
bool LaunchContext::isInitialized() { return contextBuffers.isInitialized(); }
void* LaunchContext::getCuDnnHandle() const { return CublasHelper::getInstance().cudnn(); }
sd::ErrorReference* LaunchContext::errorReference() { return contextBuffers.errorReference(); }
void* LaunchContext::engine() { return _engine; }
} // namespace sd
|
2cd9860553e33ed91893c84550e8b332243c1b7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//////////////////////////////////////////////////////////////////////////////
///
/// @file main.cu
///
/// @brief File containing main function for the VFI problem.
///
/// @author Eric M. Aldrich \n
/// ealdrich@ucsc.edu
///
/// @version 1.0
///
/// @date 23 Oct 2012
///
/// @copyright Copyright Eric M. Aldrich 2012 \n
/// Distributed under the Boost Software License, Version 1.0
/// (See accompanying file LICENSE_1_0.txt or copy at \n
/// http://www.boost.org/LICENSE_1_0.txt)
///
//////////////////////////////////////////////////////////////////////////////
#include "global.h"
#include "auxFuncs.h"
#include "rocblas.h"
#include <iostream>
#include <ctime>
#include <typeinfo>
#include <fstream>
using namespace std;
#include "vfStep.cu"
//////////////////////////////////////////////////////////////////////////////
///
/// @fn main()
///
/// @brief Main function for the VFI problem.
///
/// @details This function solves a standard neoclassical growth model with
/// value function iteration on a GPU.
///
/// @details See Aldrich, Eric M., Jesus Fernandez-Villaverde,
/// A. Ronald Gallant and Juan F. Rubio-Ramirez (2011), "Tapping the
/// supercomputer under your desk: Solving dynamic equilibrium models with
/// graphics processors", Journal of Economic Dynamics & Control, 35, 386-393.
///
/// @returns 0 upon successful completion, 1 otherwise.
///
//////////////////////////////////////////////////////////////////////////////
int main()
{
// Admin
int imax;
REAL diff = 1.0;
REAL negOne = -1.0;
// Load parameters
parameters params;
params.load("../parameters.txt");
int nk = params.nk;
int nz = params.nz;
// Time the GPU startup overhead
double tic = curr_second();
hipblasHandle_t handle;
hipblasCreate(&handle);
double toc = curr_second();
double startTime = toc - tic;
// Pointers to variables in device memory
REAL *K, *Z, *P, *V0, *V, *G, *Vtemp;
// Allocate variables in device memory
tic = curr_second(); // Start the timer for solution
size_t sizeK = nk*sizeof(REAL);
size_t sizeZ = nz*sizeof(REAL);
size_t sizeP = nz*nz*sizeof(REAL);
size_t sizeV = nk*nz*sizeof(REAL);
size_t sizeG = nk*nz*sizeof(REAL);
hipMalloc((void**)&K, sizeK);
hipMalloc((void**)&Z, sizeZ);
hipMalloc((void**)&P, sizeP);
hipMalloc((void**)&V0, sizeV);
hipMalloc((void**)&Vtemp, sizeV);
hipMalloc((void**)&V, sizeV);
hipMalloc((void**)&G, sizeG);
// Blocking
const int block_size = 4; ///< Block size for CUDA kernel.
dim3 dimBlockV(block_size, nz);
dim3 dimGridV(nk/block_size,1);
// Compute TFP grid, capital grid and initial VF
REAL hK[nk], hZ[nz], hP[nz*nz], hV0[nk*nz];
ar1(params, hZ, hP);
kGrid(params, hZ, hK);
vfInit(params, hZ, hV0);
// Copy capital grid, TFP grid and transition matrix to GPU memory
hipMemcpy(K, hK, sizeK, hipMemcpyHostToDevice);
hipMemcpy(Z, hZ, sizeZ, hipMemcpyHostToDevice);
hipMemcpy(P, hP, sizeP, hipMemcpyHostToDevice);
hipMemcpy(V0, hV0, sizeV, hipMemcpyHostToDevice);
// Iterate on the value function
int count = 0;
while(fabs(diff) > params.tol){
hipLaunchKernelGGL(( vfStep), dim3(dimGridV),dim3(dimBlockV), 0, 0, params,K,Z,P,V0,V,G);
if(typeid(realtype) == typeid(singletype)){
hipblasSaxpy(handle, nk*nz, (float*)&negOne, (float*)V, 1, (float*)V0, 1);
hipblasIsamax(handle, nk*nz, (float*)V0, 1, &imax);
} else if(typeid(realtype) == typeid(doubletype)){
hipblasDaxpy(handle, nk*nz, (double*)&negOne, (double*)V, 1, (double*)V0, 1);
hipblasIdamax(handle, nk*nz, (double*)V0, 1, &imax);
}
hipMemcpy(&diff, V0+imax, sizeof(REAL), hipMemcpyDeviceToHost);
Vtemp = V0;
V0 = V;
V = Vtemp;
++count;
}
V = V0;
// Compute solution time
toc = curr_second();
double solTime = toc - tic;
// Copy value and policy functions to host memory
REAL* hV = new REAL[nk*nz];
REAL* hG = new REAL[nk*nz];
hipMemcpy(hV, V, sizeV, hipMemcpyDeviceToHost);
hipMemcpy(hG, G, sizeG, hipMemcpyDeviceToHost);
// Free variables in device memory
hipFree(K);
hipFree(Z);
hipFree(P);
hipFree(V0);
hipFree(V);
hipFree(Vtemp);
hipFree(G);
hipblasDestroy(handle);
// Write to file (row major)
ofstream fileStartTime, fileSolTime, fileTotalTime, fileValue, filePolicy;
fileValue.precision(7);
filePolicy.precision(7);
fileStartTime.open("startTimeCUDA-C.dat");
fileSolTime.open("solTimeCUDA-C.dat");
fileTotalTime.open("totalTimeCUDA-C.dat");
fileValue.open("valFunCUDA-C.dat");
filePolicy.open("polFunCUDA-C.dat");
fileStartTime << startTime << endl;
fileSolTime << solTime << endl;
fileSolTime << startTime+solTime << endl;
fileValue << nk << endl;
fileValue << nz << endl;
filePolicy << nk << endl;
filePolicy << nz << endl;
for(int jx = 0 ; jx < nz ; ++jx){
for(int ix = 0 ; ix < nk ; ++ix){
fileValue << hV[ix*nz+jx] << endl;
filePolicy << hG[ix*nz+jx] << endl;
}
}
fileStartTime.close();
fileSolTime.close();
fileTotalTime.close();
fileValue.close();
filePolicy.close();
return 0;
}
| 2cd9860553e33ed91893c84550e8b332243c1b7a.cu | //////////////////////////////////////////////////////////////////////////////
///
/// @file main.cu
///
/// @brief File containing main function for the VFI problem.
///
/// @author Eric M. Aldrich \n
/// ealdrich@ucsc.edu
///
/// @version 1.0
///
/// @date 23 Oct 2012
///
/// @copyright Copyright Eric M. Aldrich 2012 \n
/// Distributed under the Boost Software License, Version 1.0
/// (See accompanying file LICENSE_1_0.txt or copy at \n
/// http://www.boost.org/LICENSE_1_0.txt)
///
//////////////////////////////////////////////////////////////////////////////
#include "global.h"
#include "auxFuncs.h"
#include "cublas_v2.h"
#include <iostream>
#include <ctime>
#include <typeinfo>
#include <fstream>
using namespace std;
#include "vfStep.cu"
//////////////////////////////////////////////////////////////////////////////
///
/// @fn main()
///
/// @brief Main function for the VFI problem.
///
/// @details This function solves a standard neoclassical growth model with
/// value function iteration on a GPU.
///
/// @details See Aldrich, Eric M., Jesus Fernandez-Villaverde,
/// A. Ronald Gallant and Juan F. Rubio-Ramirez (2011), "Tapping the
/// supercomputer under your desk: Solving dynamic equilibrium models with
/// graphics processors", Journal of Economic Dynamics & Control, 35, 386-393.
///
/// @returns 0 upon successful completion, 1 otherwise.
///
//////////////////////////////////////////////////////////////////////////////
int main()
{
// Admin
int imax;
REAL diff = 1.0;
REAL negOne = -1.0;
// Load parameters
parameters params;
params.load("../parameters.txt");
int nk = params.nk;
int nz = params.nz;
// Time the GPU startup overhead
double tic = curr_second();
cublasHandle_t handle;
cublasCreate(&handle);
double toc = curr_second();
double startTime = toc - tic;
// Pointers to variables in device memory
REAL *K, *Z, *P, *V0, *V, *G, *Vtemp;
// Allocate variables in device memory
tic = curr_second(); // Start the timer for solution
size_t sizeK = nk*sizeof(REAL);
size_t sizeZ = nz*sizeof(REAL);
size_t sizeP = nz*nz*sizeof(REAL);
size_t sizeV = nk*nz*sizeof(REAL);
size_t sizeG = nk*nz*sizeof(REAL);
cudaMalloc((void**)&K, sizeK);
cudaMalloc((void**)&Z, sizeZ);
cudaMalloc((void**)&P, sizeP);
cudaMalloc((void**)&V0, sizeV);
cudaMalloc((void**)&Vtemp, sizeV);
cudaMalloc((void**)&V, sizeV);
cudaMalloc((void**)&G, sizeG);
// Blocking
const int block_size = 4; ///< Block size for CUDA kernel.
dim3 dimBlockV(block_size, nz);
dim3 dimGridV(nk/block_size,1);
// Compute TFP grid, capital grid and initial VF
REAL hK[nk], hZ[nz], hP[nz*nz], hV0[nk*nz];
ar1(params, hZ, hP);
kGrid(params, hZ, hK);
vfInit(params, hZ, hV0);
// Copy capital grid, TFP grid and transition matrix to GPU memory
cudaMemcpy(K, hK, sizeK, cudaMemcpyHostToDevice);
cudaMemcpy(Z, hZ, sizeZ, cudaMemcpyHostToDevice);
cudaMemcpy(P, hP, sizeP, cudaMemcpyHostToDevice);
cudaMemcpy(V0, hV0, sizeV, cudaMemcpyHostToDevice);
// Iterate on the value function
int count = 0;
while(fabs(diff) > params.tol){
vfStep<<<dimGridV,dimBlockV>>>(params,K,Z,P,V0,V,G);
if(typeid(realtype) == typeid(singletype)){
cublasSaxpy(handle, nk*nz, (float*)&negOne, (float*)V, 1, (float*)V0, 1);
cublasIsamax(handle, nk*nz, (float*)V0, 1, &imax);
} else if(typeid(realtype) == typeid(doubletype)){
cublasDaxpy(handle, nk*nz, (double*)&negOne, (double*)V, 1, (double*)V0, 1);
cublasIdamax(handle, nk*nz, (double*)V0, 1, &imax);
}
cudaMemcpy(&diff, V0+imax, sizeof(REAL), cudaMemcpyDeviceToHost);
Vtemp = V0;
V0 = V;
V = Vtemp;
++count;
}
V = V0;
// Compute solution time
toc = curr_second();
double solTime = toc - tic;
// Copy value and policy functions to host memory
REAL* hV = new REAL[nk*nz];
REAL* hG = new REAL[nk*nz];
cudaMemcpy(hV, V, sizeV, cudaMemcpyDeviceToHost);
cudaMemcpy(hG, G, sizeG, cudaMemcpyDeviceToHost);
// Free variables in device memory
cudaFree(K);
cudaFree(Z);
cudaFree(P);
cudaFree(V0);
cudaFree(V);
cudaFree(Vtemp);
cudaFree(G);
cublasDestroy(handle);
// Write to file (row major)
ofstream fileStartTime, fileSolTime, fileTotalTime, fileValue, filePolicy;
fileValue.precision(7);
filePolicy.precision(7);
fileStartTime.open("startTimeCUDA-C.dat");
fileSolTime.open("solTimeCUDA-C.dat");
fileTotalTime.open("totalTimeCUDA-C.dat");
fileValue.open("valFunCUDA-C.dat");
filePolicy.open("polFunCUDA-C.dat");
fileStartTime << startTime << endl;
fileSolTime << solTime << endl;
fileSolTime << startTime+solTime << endl;
fileValue << nk << endl;
fileValue << nz << endl;
filePolicy << nk << endl;
filePolicy << nz << endl;
for(int jx = 0 ; jx < nz ; ++jx){
for(int ix = 0 ; ix < nk ; ++ix){
fileValue << hV[ix*nz+jx] << endl;
filePolicy << hG[ix*nz+jx] << endl;
}
}
fileStartTime.close();
fileSolTime.close();
fileTotalTime.close();
fileValue.close();
filePolicy.close();
return 0;
}
|
0e569c37ab1d832517bafa50c45f6057380299b2.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogramStorage
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 28>
class DeviceHistogramStorage {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
// Large buffer of zeroed memory, caches histograms
dh::device_vector<typename GradientSumT::ValueT> data_;
// If we run out of storage allocate one histogram at a time
// in overflow. Not cached, overwritten when a new histogram
// is requested
dh::device_vector<typename GradientSumT::ValueT> overflow_;
std::map<int, size_t> overflow_nidx_map_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2.");
public:
// Start with about 16mb
DeviceHistogramStorage() { data_.reserve(1 << 22); }
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
overflow_nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend() ||
overflow_nidx_map_.find(nidx) != overflow_nidx_map_.cend();
}
int Bins() const { return n_bins_; }
size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; }
dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; }
void AllocateHistograms(const std::vector<int>& new_nidxs) {
for (int nidx : new_nidxs) {
CHECK(!HistogramExists(nidx));
}
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize() * new_nidxs.size();
if (used_size >= kStopGrowingSize) {
// Use overflow
// Delete previous entries
overflow_nidx_map_.clear();
overflow_.resize(HistogramSize() * new_nidxs.size());
// Zero memory
auto d_data = overflow_.data().get();
dh::LaunchN(overflow_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0; });
// Append new histograms
for (int nidx : new_nidxs) {
overflow_nidx_map_[nidx] = overflow_nidx_map_.size() * HistogramSize();
}
} else {
CHECK_GE(data_.size(), used_size);
// Expand if necessary
if (data_.size() < new_used_size) {
data_.resize(::max(data_.size() * 2, new_used_size));
}
// Append new histograms
for (int nidx : new_nidxs) {
nidx_map_[nidx] = nidx_map_.size() * HistogramSize();
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
// Fetch from normal cache
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
} else {
// Fetch from overflow
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
private:
GPUHistEvaluator<GradientSumT> evaluator_;
Context const* ctx_;
public:
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogramStorage<GradientSumT> hist{};
dh::device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::device_vector<int> monotone_constraints;
dh::device_vector<float> update_predictions;
/*! \brief Sum gradient for each node. */
std::vector<GradientPairPrecise> node_sum_gradients;
TrainParam param;
HistRounding<GradientSumT> histogram_rounding;
dh::PinnedMemory pinned;
dh::PinnedMemory pinned2;
common::Monitor monitor;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types, bst_uint _n_rows,
TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features,
BatchParam _batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(page, _n_rows, batch_param, param.subsample,
param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(256);
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param,
ctx_->gpu_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPairPrecise{});
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(hipMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), hipMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)};
EvaluateSplitSharedInputs shared_inputs{
gpu_param, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map,
matrix.min_fvalue,
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
void EvaluateSplits(const std::vector<GPUExpandEntry>& candidates, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
if (candidates.empty()) return;
dh::TemporaryArray<EvaluateSplitInputs> d_node_inputs(2 * candidates.size());
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2 * candidates.size());
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param}, feature_types, matrix.feature_segments,
matrix.gidx_fvalue_map, matrix.min_fvalue,
};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
nidx[i * 2] = left_nidx;
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx);
h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1, candidate.split.left_sum,
left_feature_set, hist.GetNodeHistogram(left_nidx)};
h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1, candidate.split.right_sum,
right_feature_set, hist.GetNodeHistogram(right_nidx)};
}
bst_feature_t number_active_features = h_node_inputs[0].feature_set.size();
for (auto input : h_node_inputs) {
CHECK_EQ(input.feature_set.size(), number_active_features)
<< "Current implementation assumes that the number of active features "
"(after sampling) in any node is the same";
}
dh::safe_cuda(hipMemcpyAsync(d_node_inputs.data().get(), h_node_inputs.data(),
h_node_inputs.size() * sizeof(EvaluateSplitInputs),
hipMemcpyDefault));
this->evaluator_.EvaluateSplits(nidx, number_active_features, dh::ToSpan(d_node_inputs),
shared_inputs, dh::ToSpan(entries));
dh::safe_cuda(hipMemcpyAsync(pinned_candidates_out.data(),
entries.data().get(), sizeof(GPUExpandEntry) * entries.size(),
hipMemcpyDeviceToHost));
dh::DefaultStream().Sync();
}
void BuildHist(int nidx) {
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair,
d_ridx, d_node_hist, histogram_rounding);
}
// Attempt to do subtraction trick
// return true if succeeded
bool SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
if (!hist.HistogramExists(nidx_histogram) || !hist.HistogramExists(nidx_parent)) {
return false;
}
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
return true;
}
// Extra data for each node that is passed
// to the update position function
struct NodeSplitData {
RegTree::Node split_node;
FeatureType split_type;
common::CatBitField node_cats;
};
void UpdatePosition(const std::vector<GPUExpandEntry>& candidates, RegTree* p_tree) {
if (candidates.empty()) return;
std::vector<int> nidx(candidates.size());
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
nidx.at(i) = e.nid;
left_nidx.at(i) = split_node.LeftChild();
right_nidx.at(i) = split_node.RightChild();
split_data.at(i) = NodeSplitData{split_node, split_type, e.split.split_cats};
}
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
row_partitioner->UpdatePositionBatch(
nidx, left_nidx, right_nidx, split_data,
[=] __device__(bst_uint ridx, const NodeSplitData& data) {
// given a row index, returns the node id it belongs to
bst_float cut_value = d_matrix.GetFvalue(ridx, data.split_node.SplitIndex());
// Missing value
bool go_left = true;
if (isnan(cut_value)) {
go_left = data.split_node.DefaultLeft();
} else {
if (data.split_type == FeatureType::kCategorical) {
go_left = common::Decision<false>(data.node_cats.Bits(), cut_value,
data.split_node.DefaultLeft());
} else {
go_left = cut_value <= data.split_node.SplitCond();
}
}
return go_left;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
// Prediction cache will not be used with external memory
if (!p_fmat->SingleColBlock()) {
if (task.UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective function can not be used with external memory.";
}
p_out_position->Resize(0);
update_predictions.clear();
return;
}
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments),
p_out_position);
}
void FinalisePositionInPage(EllpackPageImpl const *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
update_predictions.resize(row_partitioner->GetRows().size());
auto d_update_predictions = dh::ToSpan(update_predictions);
p_out_position->SetDevice(ctx_->gpu_id);
p_out_position->Resize(row_partitioner->GetRows().size());
auto new_position_op = [=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats = categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision<false>(node_cats, element, node.DefaultLeft());
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
d_update_predictions[row_id] = node.LeafValue();
return position;
}; // NOLINT
auto d_out_position = p_out_position->DeviceSpan();
row_partitioner->FinalisePosition(d_out_position, new_position_op);
dh::LaunchN(row_partitioner->GetRows().size(), [=] __device__(size_t idx) {
bst_node_t position = d_out_position[idx];
d_update_predictions[idx] = d_nodes[position].LeafValue();
bool is_row_sampled = d_gpair[idx].GetHess() - .0f == 0.f;
d_out_position[idx] = is_row_sampled ? ~position : position;
});
}
bool UpdatePredictionCache(linalg::VectorView<float> out_preds_d, RegTree const* p_tree) {
if (update_predictions.empty()) {
return false;
}
CHECK(p_tree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_update_predictions = dh::ToSpan(update_predictions);
CHECK_EQ(out_preds_d.Size(), d_update_predictions.size());
dh::LaunchN(out_preds_d.Size(), [=] XGBOOST_DEVICE(size_t idx) mutable {
out_preds_d(idx) += d_update_predictions[idx];
});
return true;
}
// num histograms is the number of contiguous histograms in memory to reduce over
void AllReduceHist(int nidx, dh::AllReducer* reducer, int num_histograms) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() *
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)) *
num_histograms);
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(std::vector<GPUExpandEntry> const& candidates, dh::AllReducer* reducer,
const RegTree& tree) {
if (candidates.empty()) return;
// Some nodes we will manually compute histograms
// others we will do by subtraction
std::vector<int> hist_nidx;
std::vector<int> subtraction_nidx;
for (auto& e : candidates) {
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = e.split.right_sum.GetHess() < e.split.left_sum.GetHess();
if (fewer_right) {
hist_nidx.emplace_back(tree[e.nid].RightChild());
subtraction_nidx.emplace_back(tree[e.nid].LeftChild());
} else {
hist_nidx.emplace_back(tree[e.nid].LeftChild());
subtraction_nidx.emplace_back(tree[e.nid].RightChild());
}
}
std::vector<int> all_new = hist_nidx;
all_new.insert(all_new.end(), subtraction_nidx.begin(), subtraction_nidx.end());
// Allocate the histograms
// Guaranteed contiguous memory
hist.AllocateHistograms(all_new);
for (auto nidx : hist_nidx) {
this->BuildHist(nidx);
}
// Reduce all in one go
// This gives much better latency in a distributed setting
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), reducer, hist_nidx.size());
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;
if (!this->SubtractionTrick(parent_nidx, build_hist_nidx, subtraction_trick_nidx)) {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer, 1);
}
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!rabit::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
CHECK_LT(candidate.split.fvalue, std::numeric_limits<bst_cat_t>::max())
<< "Categorical feature value too large.";
std::vector<uint32_t> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto max_cat = candidate.split.MaxCat();
split_cats.resize(common::CatBitField::ComputeStorageSize(max_cat + 1), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
} else {
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
const auto& parent = tree[candidate.nid];
std::size_t max_nidx = ::max(parent.LeftChild(), parent.RightChild());
// Grow as needed
if (node_sum_gradients.size() <= max_nidx) {
node_sum_gradients.resize(max_nidx * 2 + 1);
}
node_sum_gradients[parent.LeftChild()] = candidate.split.left_sum;
node_sum_gradients[parent.RightChild()] = candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, parent.SplitIndex(), parent.LeftChild(),
parent.RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto gpair_it = dh::MakeTransformIterator<GradientPairPrecise>(
dh::tbegin(gpair), [] __device__(auto const& gpair) { return GradientPairPrecise{gpair}; });
GradientPairPrecise root_sum =
dh::Reduce(thrust::hip::par(alloc), gpair_it, gpair_it + gpair.size(),
GradientPairPrecise{}, thrust::plus<GradientPairPrecise>{});
rabit::Allreduce<rabit::op::Sum, double>(reinterpret_cast<double*>(&root_sum), 2);
hist.AllocateHistograms({kRootNIdx});
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer, 1);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, ObjInfo task,
RegTree* p_tree, dh::AllReducer* reducer,
HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
// Process maximum 32 nodes at a time
Driver<GPUExpandEntry> driver(param, 32);
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
for (auto& candidate : expand_set) {
this->ApplySplit(candidate, p_tree);
}
// Get the candidates we are allowed to expand further
// e.g. We do not bother further processing nodes whose children are beyond max depth
std::vector<GPUExpandEntry> filtered_expand_set;
std::copy_if(expand_set.begin(), expand_set.end(), std::back_inserter(filtered_expand_set),
[&](const auto& e) { return driver.IsChildValid(e); });
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(filtered_expand_set.size() * 2, GPUExpandEntry());
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(filtered_expand_set, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(filtered_expand_set, reducer, tree);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates);
monitor.Stop("EvaluateSplits");
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(GenericParameter const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
param_.UpdateAllowUnknown(args);
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
FromJson(config.at("train_param"), ¶m_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
out["train_param"] = ToJson(param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree, &out_position[t_idx]);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
++t_idx;
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({ctx_->gpu_id}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
ctx_->gpu_id,
param_.max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker.reset(new GPUHistMakerDevice<GradientSumT>(
ctx_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, param_,
column_sampling_seed, info_->num_col_, batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, &reducer_, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::VectorView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
char const* Name() const override { return "grow_gpu_hist"; }
private:
bool initialised_{false};
GPUHistMakerTrainParam hist_maker_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo task_;
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](GenericParameter const* tparam, ObjInfo task) {
return new GPUHistMaker(tparam, task);
});
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
| 0e569c37ab1d832517bafa50c45f6057380299b2.cu | /*!
* Copyright 2017-2022 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <utility>
#include <vector>
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/bitfield.h"
#include "../common/timer.h"
#include "../common/categorical.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "driver.h"
#include "updater_gpu_common.cuh"
#include "split_evaluator.h"
#include "constraints.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "xgboost/task.h"
#include "xgboost/tree_model.h"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogramStorage
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 28>
class DeviceHistogramStorage {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
// Large buffer of zeroed memory, caches histograms
dh::device_vector<typename GradientSumT::ValueT> data_;
// If we run out of storage allocate one histogram at a time
// in overflow. Not cached, overwritten when a new histogram
// is requested
dh::device_vector<typename GradientSumT::ValueT> overflow_;
std::map<int, size_t> overflow_nidx_map_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2.");
public:
// Start with about 16mb
DeviceHistogramStorage() { data_.reserve(1 << 22); }
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
overflow_nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend() ||
overflow_nidx_map_.find(nidx) != overflow_nidx_map_.cend();
}
int Bins() const { return n_bins_; }
size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; }
dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; }
void AllocateHistograms(const std::vector<int>& new_nidxs) {
for (int nidx : new_nidxs) {
CHECK(!HistogramExists(nidx));
}
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize() * new_nidxs.size();
if (used_size >= kStopGrowingSize) {
// Use overflow
// Delete previous entries
overflow_nidx_map_.clear();
overflow_.resize(HistogramSize() * new_nidxs.size());
// Zero memory
auto d_data = overflow_.data().get();
dh::LaunchN(overflow_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0; });
// Append new histograms
for (int nidx : new_nidxs) {
overflow_nidx_map_[nidx] = overflow_nidx_map_.size() * HistogramSize();
}
} else {
CHECK_GE(data_.size(), used_size);
// Expand if necessary
if (data_.size() < new_used_size) {
data_.resize(std::max(data_.size() * 2, new_used_size));
}
// Append new histograms
for (int nidx : new_nidxs) {
nidx_map_[nidx] = nidx_map_.size() * HistogramSize();
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
// Fetch from normal cache
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
} else {
// Fetch from overflow
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
return common::Span<GradientSumT>(reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
private:
GPUHistEvaluator<GradientSumT> evaluator_;
Context const* ctx_;
public:
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogramStorage<GradientSumT> hist{};
dh::device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::device_vector<int> monotone_constraints;
dh::device_vector<float> update_predictions;
/*! \brief Sum gradient for each node. */
std::vector<GradientPairPrecise> node_sum_gradients;
TrainParam param;
HistRounding<GradientSumT> histogram_rounding;
dh::PinnedMemory pinned;
dh::PinnedMemory pinned2;
common::Monitor monitor;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types, bst_uint _n_rows,
TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features,
BatchParam _batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(page, _n_rows, batch_param, param.subsample,
param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(256);
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param,
ctx_->gpu_id);
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), GradientPairPrecise{});
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(cudaMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), cudaMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairPrecise root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)};
EvaluateSplitSharedInputs shared_inputs{
gpu_param, feature_types, matrix.feature_segments, matrix.gidx_fvalue_map,
matrix.min_fvalue,
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
void EvaluateSplits(const std::vector<GPUExpandEntry>& candidates, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
if (candidates.empty()) return;
dh::TemporaryArray<EvaluateSplitInputs> d_node_inputs(2 * candidates.size());
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2 * candidates.size());
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param}, feature_types, matrix.feature_segments,
matrix.gidx_fvalue_map, matrix.min_fvalue,
};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
nidx[i * 2] = left_nidx;
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(), left_nidx);
h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1, candidate.split.left_sum,
left_feature_set, hist.GetNodeHistogram(left_nidx)};
h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1, candidate.split.right_sum,
right_feature_set, hist.GetNodeHistogram(right_nidx)};
}
bst_feature_t number_active_features = h_node_inputs[0].feature_set.size();
for (auto input : h_node_inputs) {
CHECK_EQ(input.feature_set.size(), number_active_features)
<< "Current implementation assumes that the number of active features "
"(after sampling) in any node is the same";
}
dh::safe_cuda(cudaMemcpyAsync(d_node_inputs.data().get(), h_node_inputs.data(),
h_node_inputs.size() * sizeof(EvaluateSplitInputs),
cudaMemcpyDefault));
this->evaluator_.EvaluateSplits(nidx, number_active_features, dh::ToSpan(d_node_inputs),
shared_inputs, dh::ToSpan(entries));
dh::safe_cuda(cudaMemcpyAsync(pinned_candidates_out.data(),
entries.data().get(), sizeof(GPUExpandEntry) * entries.size(),
cudaMemcpyDeviceToHost));
dh::DefaultStream().Sync();
}
void BuildHist(int nidx) {
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair,
d_ridx, d_node_hist, histogram_rounding);
}
// Attempt to do subtraction trick
// return true if succeeded
bool SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
if (!hist.HistogramExists(nidx_histogram) || !hist.HistogramExists(nidx_parent)) {
return false;
}
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
return true;
}
// Extra data for each node that is passed
// to the update position function
struct NodeSplitData {
RegTree::Node split_node;
FeatureType split_type;
common::CatBitField node_cats;
};
void UpdatePosition(const std::vector<GPUExpandEntry>& candidates, RegTree* p_tree) {
if (candidates.empty()) return;
std::vector<int> nidx(candidates.size());
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
nidx.at(i) = e.nid;
left_nidx.at(i) = split_node.LeftChild();
right_nidx.at(i) = split_node.RightChild();
split_data.at(i) = NodeSplitData{split_node, split_type, e.split.split_cats};
}
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
row_partitioner->UpdatePositionBatch(
nidx, left_nidx, right_nidx, split_data,
[=] __device__(bst_uint ridx, const NodeSplitData& data) {
// given a row index, returns the node id it belongs to
bst_float cut_value = d_matrix.GetFvalue(ridx, data.split_node.SplitIndex());
// Missing value
bool go_left = true;
if (isnan(cut_value)) {
go_left = data.split_node.DefaultLeft();
} else {
if (data.split_type == FeatureType::kCategorical) {
go_left = common::Decision<false>(data.node_cats.Bits(), cut_value,
data.split_node.DefaultLeft());
} else {
go_left = cut_value <= data.split_node.SplitCond();
}
}
return go_left;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
// Prediction cache will not be used with external memory
if (!p_fmat->SingleColBlock()) {
if (task.UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective function can not be used with external memory.";
}
p_out_position->Resize(0);
update_predictions.clear();
return;
}
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments),
p_out_position);
}
void FinalisePositionInPage(EllpackPageImpl const *page,
const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types,
common::Span<uint32_t const> categories,
common::Span<RegTree::Segment> categories_segments,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
update_predictions.resize(row_partitioner->GetRows().size());
auto d_update_predictions = dh::ToSpan(update_predictions);
p_out_position->SetDevice(ctx_->gpu_id);
p_out_position->Resize(row_partitioner->GetRows().size());
auto new_position_op = [=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats = categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision<false>(node_cats, element, node.DefaultLeft());
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
d_update_predictions[row_id] = node.LeafValue();
return position;
}; // NOLINT
auto d_out_position = p_out_position->DeviceSpan();
row_partitioner->FinalisePosition(d_out_position, new_position_op);
dh::LaunchN(row_partitioner->GetRows().size(), [=] __device__(size_t idx) {
bst_node_t position = d_out_position[idx];
d_update_predictions[idx] = d_nodes[position].LeafValue();
bool is_row_sampled = d_gpair[idx].GetHess() - .0f == 0.f;
d_out_position[idx] = is_row_sampled ? ~position : position;
});
}
bool UpdatePredictionCache(linalg::VectorView<float> out_preds_d, RegTree const* p_tree) {
if (update_predictions.empty()) {
return false;
}
CHECK(p_tree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_update_predictions = dh::ToSpan(update_predictions);
CHECK_EQ(out_preds_d.Size(), d_update_predictions.size());
dh::LaunchN(out_preds_d.Size(), [=] XGBOOST_DEVICE(size_t idx) mutable {
out_preds_d(idx) += d_update_predictions[idx];
});
return true;
}
// num histograms is the number of contiguous histograms in memory to reduce over
void AllReduceHist(int nidx, dh::AllReducer* reducer, int num_histograms) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() *
(sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)) *
num_histograms);
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(std::vector<GPUExpandEntry> const& candidates, dh::AllReducer* reducer,
const RegTree& tree) {
if (candidates.empty()) return;
// Some nodes we will manually compute histograms
// others we will do by subtraction
std::vector<int> hist_nidx;
std::vector<int> subtraction_nidx;
for (auto& e : candidates) {
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = e.split.right_sum.GetHess() < e.split.left_sum.GetHess();
if (fewer_right) {
hist_nidx.emplace_back(tree[e.nid].RightChild());
subtraction_nidx.emplace_back(tree[e.nid].LeftChild());
} else {
hist_nidx.emplace_back(tree[e.nid].LeftChild());
subtraction_nidx.emplace_back(tree[e.nid].RightChild());
}
}
std::vector<int> all_new = hist_nidx;
all_new.insert(all_new.end(), subtraction_nidx.begin(), subtraction_nidx.end());
// Allocate the histograms
// Guaranteed contiguous memory
hist.AllocateHistograms(all_new);
for (auto nidx : hist_nidx) {
this->BuildHist(nidx);
}
// Reduce all in one go
// This gives much better latency in a distributed setting
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), reducer, hist_nidx.size());
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;
if (!this->SubtractionTrick(parent_nidx, build_hist_nidx, subtraction_trick_nidx)) {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer, 1);
}
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!rabit::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto is_cat = candidate.split.is_cat;
if (is_cat) {
CHECK_LT(candidate.split.fvalue, std::numeric_limits<bst_cat_t>::max())
<< "Categorical feature value too large.";
std::vector<uint32_t> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto max_cat = candidate.split.MaxCat();
split_cats.resize(common::CatBitField::ComputeStorageSize(max_cat + 1), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
} else {
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
const auto& parent = tree[candidate.nid];
std::size_t max_nidx = std::max(parent.LeftChild(), parent.RightChild());
// Grow as needed
if (node_sum_gradients.size() <= max_nidx) {
node_sum_gradients.resize(max_nidx * 2 + 1);
}
node_sum_gradients[parent.LeftChild()] = candidate.split.left_sum;
node_sum_gradients[parent.RightChild()] = candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, parent.SplitIndex(), parent.LeftChild(),
parent.RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto gpair_it = dh::MakeTransformIterator<GradientPairPrecise>(
dh::tbegin(gpair), [] __device__(auto const& gpair) { return GradientPairPrecise{gpair}; });
GradientPairPrecise root_sum =
dh::Reduce(thrust::cuda::par(alloc), gpair_it, gpair_it + gpair.size(),
GradientPairPrecise{}, thrust::plus<GradientPairPrecise>{});
rabit::Allreduce<rabit::op::Sum, double>(reinterpret_cast<double*>(&root_sum), 2);
hist.AllocateHistograms({kRootNIdx});
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer, 1);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat, ObjInfo task,
RegTree* p_tree, dh::AllReducer* reducer,
HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
// Process maximum 32 nodes at a time
Driver<GPUExpandEntry> driver(param, 32);
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, reducer) });
monitor.Stop("InitRoot");
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
for (auto& candidate : expand_set) {
this->ApplySplit(candidate, p_tree);
}
// Get the candidates we are allowed to expand further
// e.g. We do not bother further processing nodes whose children are beyond max depth
std::vector<GPUExpandEntry> filtered_expand_set;
std::copy_if(expand_set.begin(), expand_set.end(), std::back_inserter(filtered_expand_set),
[&](const auto& e) { return driver.IsChildValid(e); });
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(filtered_expand_set.size() * 2, GPUExpandEntry());
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(filtered_expand_set, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(filtered_expand_set, reducer, tree);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates);
monitor.Stop("EvaluateSplits");
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(GenericParameter const* ctx, ObjInfo task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
param_.UpdateAllowUnknown(args);
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
FromJson(config.at("train_param"), ¶m_);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
out["train_param"] = ToJson(param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
// build tree
try {
size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree, &out_position[t_idx]);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
++t_idx;
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.Stop("Update");
}
void InitDataOnce(DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({ctx_->gpu_id}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
ctx_->gpu_id,
param_.max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker.reset(new GPUHistMakerDevice<GradientSumT>(
ctx_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, param_,
column_sampling_seed, info_->num_col_, batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree{}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree,
HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, &reducer_, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::VectorView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
char const* Name() const override { return "grow_gpu_hist"; }
private:
bool initialised_{false};
GPUHistMakerTrainParam hist_maker_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo task_;
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](GenericParameter const* tparam, ObjInfo task) {
return new GPUHistMaker(tparam, task);
});
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
73c11a1583b5bc2944eaa2fdea4487b8363df750.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Int8ToFloatExecution.cu
// MNN
//
// Created by MNN on 2023/01/03.
// Copyright 2018, Alibaba Group Holding Limited
//
#ifdef ENABLE_CUDA_QUANT
#include "Int8ToFloatExecution.hpp"
#include "../MNNCUDADefine.hpp"
#include "../MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
template<typename T>
__global__ void INT8_2_FLOAT(const int total,
const int channelsPackInt8,
const int channelsPackFloat,
const int channels,
const int8_t* in,
T* out,
const float* scaleData,
const int8_t zeroPoint,
DivModFast d_cp
) {
CUDA_KERNEL_LOOP(index, total) {
int nhw_idx, c_idx;
d_cp.divmod(index, nhw_idx, c_idx);
int idx_inp = nhw_idx * channelsPackInt8 + 4*c_idx;
char4 inp_0 = ((char4 *)(in + idx_inp))[0];
float4 scale_0 = ((float4 *)(scaleData + (c_idx << 2)))[0];
const int idx_out = index << 2;
out[idx_out+0] = (T)((inp_0.x - zeroPoint) * scale_0.x);
out[idx_out+1] = (T)((inp_0.y - zeroPoint) * scale_0.y);
out[idx_out+2] = (T)((inp_0.z - zeroPoint) * scale_0.z);
out[idx_out+3] = (T)((inp_0.w - zeroPoint) * scale_0.w);
}
}
template<typename T>
__global__ void INT8_2_FLOAT_SINGLE(const int total,
const int channelsPackInt8,
const int channelsPackFloat,
const int channels,
const int8_t* in,
T* out,
const float scaleData,
const int8_t zeroPoint,
DivModFast d_cp
) {
CUDA_KERNEL_LOOP(index, total) {
int nhw_idx, c_idx;
d_cp.divmod(index, nhw_idx, c_idx);
int idx_inp = nhw_idx * channelsPackInt8 + 4*c_idx;
char4 inp_0 = ((char4 *)(in + idx_inp))[0];
const int idx_out = index << 2;
out[idx_out+0] = (T)((inp_0.x - zeroPoint) * scaleData);
out[idx_out+1] = (T)((inp_0.y - zeroPoint) * scaleData);
out[idx_out+2] = (T)((inp_0.z - zeroPoint) * scaleData);
out[idx_out+3] = (T)((inp_0.w - zeroPoint) * scaleData);
}
}
Int8ToFloatExecution::Int8ToFloatExecution(Backend *backend, const std::vector<Tensor *> &inputs, const MNN::Op *param) : Execution(backend) {
auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime();
auto scale = param->main_as_QuantizedFloatParam();
const int scaleLen = scale->tensorScale()->size();
mClipBits = scale->nbits();
if (1 == scaleLen) {
mSingle = true;
mSingleScale = scale->tensorScale()->data()[0];
} else {
auto staticPool = static_cast<CUDABackend*>(backend)->getStaticBufferPool();
mScaleStorage = staticPool->alloc(UP_DIV(scaleLen, PACK_NUMBER) * PACK_NUMBER * sizeof(float));
mScales = (void*)((uint8_t*)mScaleStorage.first + mScaleStorage.second);
runtime->memset(mScales, 0, UP_DIV(scaleLen, PACK_NUMBER) * PACK_NUMBER * sizeof(float));
runtime->memcpy(mScales, scale->tensorScale()->data(), scaleLen * sizeof(float), MNNMemcpyHostToDevice);
}
mZeroPoint = scale->zeroPoint();
}
Int8ToFloatExecution::~Int8ToFloatExecution() {
if(!mSingle) {
auto staticPool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
staticPool->free(mScaleStorage);
}
}
ErrorCode Int8ToFloatExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto input = inputs[0];
auto dims = input->dimensions();
MNN_ASSERT(dims >= 2);
auto format = TensorUtils::getDescribe(input)->dimensionFormat;
if (format == MNN_DATA_FORMAT_NHWC) {
mChannel = input->length(dims-1);
mArea = 1;
for(int i = 0; i < dims-1; i++) {
mArea *= input->length(i);
}
} else if(format == MNN_DATA_FORMAT_NCHW || format == MNN_DATA_FORMAT_NC4HW4) {
mChannel = input->length(1);
mArea = input->length(0);
for(int i = 2; i < dims; i++) {
mArea *= input->length(i);
}
} else {
MNN_ERROR("Int8ToFloatExecution not support format:%d\n", format);
MNN_ASSERT(false);
}
mCount = mArea * UP_DIV(mChannel, PACK_NUMBER) * 2;
// printf("Int8_2_Float size:%d-%d-%d\n\n", mArea, mChannel, mCount);
return NO_ERROR;
}
ErrorCode Int8ToFloatExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int block_num = runtime->blocks_num(mCount);
int threads_num = runtime->threads_num();
auto input_addr = (void*)inputs[0]->deviceId();
auto output_addr = (void*)outputs[0]->deviceId();
auto channelPackInt8 = UP_DIV(mChannel, INT8_PACK_NUMBER) * INT8_PACK_NUMBER;
auto channelPackFloat = UP_DIV(mChannel, PACK_NUMBER) * 2;
DivModFast cpD(channelPackFloat);
if (static_cast<CUDABackend*>(backend())->useFp16()) {
if(mSingle) {
hipLaunchKernelGGL(( INT8_2_FLOAT_SINGLE), dim3(block_num), dim3(threads_num), 0, 0, mCount, channelPackInt8, channelPackFloat, mChannel, (const int8_t *)input_addr, (half *)output_addr,\
mSingleScale, mZeroPoint, cpD);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( INT8_2_FLOAT), dim3(block_num), dim3(threads_num), 0, 0, mCount, channelPackInt8, channelPackFloat, mChannel, (const int8_t *)input_addr, (half *)output_addr,\
(const float *)mScales, mZeroPoint, cpD);
checkKernelErrors;
}
} else {
if(mSingle) {
hipLaunchKernelGGL(( INT8_2_FLOAT_SINGLE), dim3(block_num), dim3(threads_num), 0, 0, mCount, channelPackInt8, channelPackFloat, mChannel, (const int8_t *)input_addr, (float *)output_addr,\
mSingleScale, mZeroPoint, cpD);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( INT8_2_FLOAT), dim3(block_num), dim3(threads_num), 0, 0, mCount, channelPackInt8, channelPackFloat, mChannel, (const int8_t *)input_addr, (float *)output_addr,\
(const float *)mScales, mZeroPoint, cpD);
checkKernelErrors;
}
}
return NO_ERROR;
}
class Int8ToFloatCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if(op->main_as_QuantizedFloatParam() == nullptr) {
return new CastWrapExecution(backend, DataType_DT_FLOAT);
}
return new Int8ToFloatExecution(backend, inputs, op);
}
};
static CUDACreatorRegister<Int8ToFloatCreator> __init(OpType_Int8ToFloat);
}
}
#endif | 73c11a1583b5bc2944eaa2fdea4487b8363df750.cu | //
// Int8ToFloatExecution.cu
// MNN
//
// Created by MNN on 2023/01/03.
// Copyright © 2018, Alibaba Group Holding Limited
//
#ifdef ENABLE_CUDA_QUANT
#include "Int8ToFloatExecution.hpp"
#include "../MNNCUDADefine.hpp"
#include "../MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
#define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
template<typename T>
__global__ void INT8_2_FLOAT(const int total,
const int channelsPackInt8,
const int channelsPackFloat,
const int channels,
const int8_t* in,
T* out,
const float* scaleData,
const int8_t zeroPoint,
DivModFast d_cp
) {
CUDA_KERNEL_LOOP(index, total) {
int nhw_idx, c_idx;
d_cp.divmod(index, nhw_idx, c_idx);
int idx_inp = nhw_idx * channelsPackInt8 + 4*c_idx;
char4 inp_0 = ((char4 *)(in + idx_inp))[0];
float4 scale_0 = ((float4 *)(scaleData + (c_idx << 2)))[0];
const int idx_out = index << 2;
out[idx_out+0] = (T)((inp_0.x - zeroPoint) * scale_0.x);
out[idx_out+1] = (T)((inp_0.y - zeroPoint) * scale_0.y);
out[idx_out+2] = (T)((inp_0.z - zeroPoint) * scale_0.z);
out[idx_out+3] = (T)((inp_0.w - zeroPoint) * scale_0.w);
}
}
template<typename T>
__global__ void INT8_2_FLOAT_SINGLE(const int total,
const int channelsPackInt8,
const int channelsPackFloat,
const int channels,
const int8_t* in,
T* out,
const float scaleData,
const int8_t zeroPoint,
DivModFast d_cp
) {
CUDA_KERNEL_LOOP(index, total) {
int nhw_idx, c_idx;
d_cp.divmod(index, nhw_idx, c_idx);
int idx_inp = nhw_idx * channelsPackInt8 + 4*c_idx;
char4 inp_0 = ((char4 *)(in + idx_inp))[0];
const int idx_out = index << 2;
out[idx_out+0] = (T)((inp_0.x - zeroPoint) * scaleData);
out[idx_out+1] = (T)((inp_0.y - zeroPoint) * scaleData);
out[idx_out+2] = (T)((inp_0.z - zeroPoint) * scaleData);
out[idx_out+3] = (T)((inp_0.w - zeroPoint) * scaleData);
}
}
Int8ToFloatExecution::Int8ToFloatExecution(Backend *backend, const std::vector<Tensor *> &inputs, const MNN::Op *param) : Execution(backend) {
auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime();
auto scale = param->main_as_QuantizedFloatParam();
const int scaleLen = scale->tensorScale()->size();
mClipBits = scale->nbits();
if (1 == scaleLen) {
mSingle = true;
mSingleScale = scale->tensorScale()->data()[0];
} else {
auto staticPool = static_cast<CUDABackend*>(backend)->getStaticBufferPool();
mScaleStorage = staticPool->alloc(UP_DIV(scaleLen, PACK_NUMBER) * PACK_NUMBER * sizeof(float));
mScales = (void*)((uint8_t*)mScaleStorage.first + mScaleStorage.second);
runtime->memset(mScales, 0, UP_DIV(scaleLen, PACK_NUMBER) * PACK_NUMBER * sizeof(float));
runtime->memcpy(mScales, scale->tensorScale()->data(), scaleLen * sizeof(float), MNNMemcpyHostToDevice);
}
mZeroPoint = scale->zeroPoint();
}
Int8ToFloatExecution::~Int8ToFloatExecution() {
if(!mSingle) {
auto staticPool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
staticPool->free(mScaleStorage);
}
}
ErrorCode Int8ToFloatExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto input = inputs[0];
auto dims = input->dimensions();
MNN_ASSERT(dims >= 2);
auto format = TensorUtils::getDescribe(input)->dimensionFormat;
if (format == MNN_DATA_FORMAT_NHWC) {
mChannel = input->length(dims-1);
mArea = 1;
for(int i = 0; i < dims-1; i++) {
mArea *= input->length(i);
}
} else if(format == MNN_DATA_FORMAT_NCHW || format == MNN_DATA_FORMAT_NC4HW4) {
mChannel = input->length(1);
mArea = input->length(0);
for(int i = 2; i < dims; i++) {
mArea *= input->length(i);
}
} else {
MNN_ERROR("Int8ToFloatExecution not support format:%d\n", format);
MNN_ASSERT(false);
}
mCount = mArea * UP_DIV(mChannel, PACK_NUMBER) * 2;
// printf("Int8_2_Float size:%d-%d-%d\n\n", mArea, mChannel, mCount);
return NO_ERROR;
}
ErrorCode Int8ToFloatExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
int block_num = runtime->blocks_num(mCount);
int threads_num = runtime->threads_num();
auto input_addr = (void*)inputs[0]->deviceId();
auto output_addr = (void*)outputs[0]->deviceId();
auto channelPackInt8 = UP_DIV(mChannel, INT8_PACK_NUMBER) * INT8_PACK_NUMBER;
auto channelPackFloat = UP_DIV(mChannel, PACK_NUMBER) * 2;
DivModFast cpD(channelPackFloat);
if (static_cast<CUDABackend*>(backend())->useFp16()) {
if(mSingle) {
INT8_2_FLOAT_SINGLE<<<block_num, threads_num>>>(mCount, channelPackInt8, channelPackFloat, mChannel, (const int8_t *)input_addr, (half *)output_addr,\
mSingleScale, mZeroPoint, cpD);
checkKernelErrors;
} else {
INT8_2_FLOAT<<<block_num, threads_num>>>(mCount, channelPackInt8, channelPackFloat, mChannel, (const int8_t *)input_addr, (half *)output_addr,\
(const float *)mScales, mZeroPoint, cpD);
checkKernelErrors;
}
} else {
if(mSingle) {
INT8_2_FLOAT_SINGLE<<<block_num, threads_num>>>(mCount, channelPackInt8, channelPackFloat, mChannel, (const int8_t *)input_addr, (float *)output_addr,\
mSingleScale, mZeroPoint, cpD);
checkKernelErrors;
} else {
INT8_2_FLOAT<<<block_num, threads_num>>>(mCount, channelPackInt8, channelPackFloat, mChannel, (const int8_t *)input_addr, (float *)output_addr,\
(const float *)mScales, mZeroPoint, cpD);
checkKernelErrors;
}
}
return NO_ERROR;
}
class Int8ToFloatCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if(op->main_as_QuantizedFloatParam() == nullptr) {
return new CastWrapExecution(backend, DataType_DT_FLOAT);
}
return new Int8ToFloatExecution(backend, inputs, op);
}
};
static CUDACreatorRegister<Int8ToFloatCreator> __init(OpType_Int8ToFloat);
}
}
#endif |
e1adbe43ebd5debaf47767101bdd8f0685fa6b20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include "ppm_kernel.cu"
#define DEFAULT_THRESHOLD 8000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
#define BLOCKSIZE 16
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// really read
char duh[80];
char *line = chars;
// find the start of the pixel data.
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++)
pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
fp = fopen(filename, "wb");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
unsigned int* computeOnGPU(unsigned int * input, int width, int height, int thresh){
unsigned int *d_input = NULL;
unsigned int *d_output = NULL;
unsigned int *h_output = (unsigned int*)malloc(sizeof(int) * width * height);
/* for (int i = 0; i < width * height; ++i) { */
/* printf("%d\n",input[i]); */
/* } */
int size = sizeof(int) * width * height;
hipMalloc((void**)&d_input, size);
hipMalloc((void**)&d_output, size);
hipMemcpy(d_input,input, size,hipMemcpyHostToDevice);
/* printf("%d\n%d",width,height); */
dim3 dimGrid((width-1+BLOCKSIZE)/BLOCKSIZE,(height-1 + BLOCKSIZE)/BLOCKSIZE);
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
hipLaunchKernelGGL(( ppmKernel2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_output,width,height,thresh);
hipDeviceSynchronize();
hipMemcpy(h_output, d_output,size,hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_output);
return h_output;
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
unsigned int *gpu = computeOnGPU(pic, xsize, ysize, thresh);
/* write_ppm("gpu.ppm", xsize,ysize,255,gpu); */
int i, j, magnitude, sum1, sum2;
for (int col=0; col<xsize; col++) {
for (int row=0; row<ysize; row++) {
*result++ = 0;
}
}
for (i = 1; i < ysize - 1; i++) {
for (j = 1; j < xsize -1; j++) {
int offset = i*xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
}
int s = 0;
/* for (int i = 0; i < ysize * xsize; ++i) { */
/* printf("%d,%d\n",gpu[i],result[i]); */
/* if(gpu[i] - result[i] != 0){ */
/* s++; */
/* } */
/* } */
/* printf("%d dif in %d ",s,ysize * xsize); */
write_ppm( "result8000gold.ppm", xsize, ysize, 255, result);
fprintf(stderr, "sobel done\n");
}
| e1adbe43ebd5debaf47767101bdd8f0685fa6b20.cu | #include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include "ppm_kernel.cu"
#define DEFAULT_THRESHOLD 8000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
#define BLOCKSIZE 16
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// really read
char duh[80];
char *line = chars;
// find the start of the pixel data.
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++)
pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
fp = fopen(filename, "wb");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
unsigned int* computeOnGPU(unsigned int * input, int width, int height, int thresh){
unsigned int *d_input = NULL;
unsigned int *d_output = NULL;
unsigned int *h_output = (unsigned int*)malloc(sizeof(int) * width * height);
/* for (int i = 0; i < width * height; ++i) { */
/* printf("%d\n",input[i]); */
/* } */
int size = sizeof(int) * width * height;
cudaMalloc((void**)&d_input, size);
cudaMalloc((void**)&d_output, size);
cudaMemcpy(d_input,input, size,cudaMemcpyHostToDevice);
/* printf("%d\n%d",width,height); */
dim3 dimGrid((width-1+BLOCKSIZE)/BLOCKSIZE,(height-1 + BLOCKSIZE)/BLOCKSIZE);
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
ppmKernel2<<<dimGrid,dimBlock>>>(d_input, d_output,width,height,thresh);
cudaDeviceSynchronize();
cudaMemcpy(h_output, d_output,size,cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_output);
return h_output;
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = strdup( DEFAULT_FILENAME);
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
unsigned int *gpu = computeOnGPU(pic, xsize, ysize, thresh);
/* write_ppm("gpu.ppm", xsize,ysize,255,gpu); */
int i, j, magnitude, sum1, sum2;
for (int col=0; col<xsize; col++) {
for (int row=0; row<ysize; row++) {
*result++ = 0;
}
}
for (i = 1; i < ysize - 1; i++) {
for (j = 1; j < xsize -1; j++) {
int offset = i*xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
}
int s = 0;
/* for (int i = 0; i < ysize * xsize; ++i) { */
/* printf("%d,%d\n",gpu[i],result[i]); */
/* if(gpu[i] - result[i] != 0){ */
/* s++; */
/* } */
/* } */
/* printf("%d dif in %d ",s,ysize * xsize); */
write_ppm( "result8000gold.ppm", xsize, ysize, 255, result);
fprintf(stderr, "sobel done\n");
}
|
7c2b7dea39ece03433dd4cdaf5dd5d37831d18c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* developed by zhujin
*/
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/io/coded_stream.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/util_img.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/proto/caffe.pb.h"
namespace caffe {
template <typename Dtype>
__global__ void kernel_BiLinearResize(const int nthreads, const Dtype* src_data, const int src_height, const int src_width,
Dtype* dst_data, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(i, nthreads) {
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}
template <typename Dtype>
void BiLinearResizeMat_gpu(const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width)
{
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width;
hipLaunchKernelGGL(( kernel_BiLinearResize<Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
loop_n,src, src_height, src_width, dst, dst_height, dst_width, scale_h, scale_w);
//CUDA_POST_KERNEL_CHECK;
}
template void BiLinearResizeMat_gpu(const float* src, const int src_height, const int src_width,
float* dst, const int dst_height, const int dst_width);
template void BiLinearResizeMat_gpu(const double* src, const int src_height, const int src_width,
double* dst, const int dst_height, const int dst_width);
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src, const int src_n, const int src_c,
Blob<Dtype>* dst, const int dst_n, const int dst_c) {
const int src_channels = src->channels();
const int src_height = src->height();
const int src_width = src->width();
const int src_offset = (src_n * src_channels + src_c) * src_height * src_width;
const int dst_channels = dst->channels();
const int dst_height = dst->height();
const int dst_width = dst->width();
const int dst_offset = (dst_n * dst_channels + dst_c) * dst_height * dst_width;
const Dtype* src_data = &(src->gpu_data()[src_offset]);
Dtype* dst_data = &(dst->mutable_gpu_data()[dst_offset]);
BiLinearResizeMat_gpu(src_data, src_height, src_width,
dst_data, dst_height, dst_width);
CUDA_POST_KERNEL_CHECK;
}
template void ResizeBlob_gpu(const Blob<float>* src, const int src_n, const int src_c,
Blob<float>* dst, const int dst_n, const int dst_c);
template void ResizeBlob_gpu(const Blob<double>* src, const int src_n, const int src_c,
Blob<double>* dst, const int dst_n, const int dst_c);
template <typename Dtype>
__global__ void kernel_GetBiLinearResizeMatRules(const int nthreads, const int src_height, const int src_width,
const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int dst_h = index /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = index %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
// dst_data[dst_idx] = 0;
const int src_idx = src_offset_1 + src_w;
loc1[dst_idx] = src_idx;
weight1[dst_idx] = w_h0 * w_w0;
loc2[dst_idx] = 0;
weight2[dst_idx] = 0;
weight3[dst_idx] = 0;
loc3[dst_idx] = 0;
loc4[dst_idx] = 0;
weight4[dst_idx] = 0;
if (src_w + 1 < src_width)
{
loc2[dst_idx] = src_idx + 1;
weight2[dst_idx] = w_h0 * w_w1;
// dst_data[dst_idx] += (w_h0 * w_w1 * src_data[src_idx + 1]);
}
if (src_h + 1 < src_height)
{
// dst_data[dst_idx] += (w_h1 * w_w0 * src_data[src_idx + src_width]);
weight3[dst_idx] = w_h1 * w_w0;
loc3[dst_idx] = src_idx + src_width;
}
if (src_w + 1 < src_width && src_h + 1 < src_height)
{
loc4[dst_idx] = src_idx + src_width + 1;
weight4[dst_idx] = w_h1 * w_w1;
// dst_data[dst_idx] += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
}
}
}
template <typename Dtype>
__global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int i = index %( dst_height * dst_width);
int c = (index/(dst_height * dst_width))%channels;
int n = (index/(dst_height * dst_width))/channels;
int src_offset = (n * channels + c) * src_height * src_width;
int dst_offset = (n * channels + c) * dst_height * dst_width;
const Dtype* src_data = src+src_offset;
Dtype* dst_data = dst+dst_offset;
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}
/*
// new version by Sifei Liu
template <typename Dtype>
__global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int d_i = index %( dst_height * dst_width);
int d_c = (index/(dst_height * dst_width))%channels;
int d_n = (index/(dst_height * dst_width))/channels;
int s_c = (index/(src_height * src_width))%channels;
int s_n = (index/(src_height * src_width))/channels;
int src_offset = (s_n * channels + s_c) * src_height * src_width;
int dst_offset = (d_n * channels + d_c) * dst_height * dst_width;
const Dtype* src_data = src+src_offset;
Dtype* dst_data = dst+dst_offset;
int dst_h = d_i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = d_i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}*/
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst) {
CHECK(src->num() == dst->num())<<"src->num() == dst->num()";
CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()";
const int src_num = src->num();
const int src_channels = src->channels();
const int src_height = src->height();
const int src_width = src->width();
const int dst_channels = dst->channels();
const int dst_height = dst->height();
const int dst_width = dst->width();
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width*dst_channels*src_num;
const Dtype* src_data = src->gpu_data();
Dtype* dst_data = dst->mutable_gpu_data();
hipLaunchKernelGGL(( kernel_ResizeBlob<Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, loop_n,src_num,src_channels,
src_data, src_height,src_width,
dst_data, dst_height, dst_width,
scale_h,scale_w);
CUDA_POST_KERNEL_CHECK;
}
template void ResizeBlob_gpu(const Blob<float>* src,
Blob<float>* dst);
template void ResizeBlob_gpu(const Blob<double>* src,
Blob<double>* dst);
template <typename Dtype>
void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width;
hipLaunchKernelGGL(( kernel_GetBiLinearResizeMatRules<Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
loop_n, src_height, src_width,
dst_height, dst_width, scale_h, scale_w,
loc1, weight1, loc2, weight2,
loc3, weight3, loc4, weight4);
CUDA_POST_KERNEL_CHECK;
}
template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
float* loc1, float* weight1, float* loc2, float* weight2,
float* loc3, float* weight3, float* loc4, float* weight4);
template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
double* loc1, double* weight1, double* loc2, double* weight2,
double* loc3, double* weight3, double* loc4, double* weight4);
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst,
Blob<Dtype>* loc1, Blob<Dtype>* loc2, Blob<Dtype>* loc3, Blob<Dtype>* loc4){
CHECK(src->num() == dst->num())<<"src->num() == dst->num()";
CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()";
GetBiLinearResizeMatRules_gpu( src->height(),src->width(),
dst->height(), dst->width(),
loc1->mutable_gpu_data(), loc1->mutable_gpu_diff(), loc2->mutable_gpu_data(), loc2->mutable_gpu_diff(),
loc3->mutable_gpu_data(), loc3->mutable_gpu_diff(), loc4->mutable_gpu_data(), loc4->mutable_gpu_diff());
ResizeBlob_gpu( src, dst) ;
}
template void ResizeBlob_gpu(const Blob<float>* src,Blob<float>* dst,
Blob<float>* loc1, Blob<float>* loc2, Blob<float>* loc3, Blob<float>* loc4);
template void ResizeBlob_gpu(const Blob<double>* src,Blob<double>* dst,
Blob<double>* loc1, Blob<double>* loc2, Blob<double>* loc3, Blob<double>* loc4);
/*
template <typename Dtype>
void GenerateSubBlobs_gpu(const Blob<Dtype>& src,
Blob<Dtype>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w)
{
const int nums_ = src.num();
const int channels_ = src.channels();
const int height_ = src.height();
const int width_ = src.width();
const int height_col_ =(height_ + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col_ = (width_ + 2 * pad_w - kernel_w) / stride_w + 1;
*
* actually after im2col_v2, data is stored as
* col_buffer_.Reshape(1*height_out_*width_out_, channels_ , kernel_h_ , kernel_w_);
* *
dst.Reshape(height_col_*width_col_*nums_,channels_, kernel_h, kernel_w);
caffe::caffe_gpu_set(dst.count(),Dtype(0),dst.mutable_gpu_data());
for(int n = 0; n < nums_; n++){
const Dtype* src_data = src.gpu_data() + src.offset(n);
Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n*height_col_*width_col_);
caffe::im2col_v2_gpu(src_data, channels_, height_,
width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dst_data);
}
}
template void GenerateSubBlobs_gpu(const Blob<float>& src,
Blob<float>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w);
template void GenerateSubBlobs_gpu(const Blob<double>& src,
Blob<double>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w);
*/
template <typename Dtype>
__global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data,
const int num, const int channels, const int in_h, const int in_w,
const int out_h, const int out_w, const int start_h, const int start_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index/channels/out_h/out_w;
int c = (index/(out_h*out_w))% channels;
int h = (index%(out_h*out_w))/out_w;
int w = (index%(out_h*out_w))%out_w;
Dtype* dst_data_ptr =dst_data+ ((n* channels+c)*out_h )*out_w ;
const Dtype* src_data_ptr = src_data + ((n* channels+c)*in_h )*in_w ;
dst_data_ptr[h*out_w+w] = src_data_ptr[(h+start_h)*in_w + w+start_w];
}
}
template <typename Dtype>
void CropBlobs_gpu( const Blob<Dtype>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<Dtype>&dst)
{
const int in_h = src.height();
const int in_w = src.width();
const int num = src.num();
const int channels = src.channels();
const int out_h = end_h - start_h;
const int out_w = end_w - start_w;
CHECK(out_h > 0) <<" end_h should be larger than start_h";
CHECK(out_w > 0) <<" end_w should be larger than start_w";
CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height";
CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width";
dst.Reshape(num,channels,out_h,out_w);
if((out_h != in_h) || (out_w != in_w)){
const int loop_n = num*channels*out_h*out_w;
hipLaunchKernelGGL(( kernel_CropBlob <Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, loop_n,
src.gpu_data(), dst.mutable_gpu_data(),
num, channels, in_h, in_w, out_h, out_w, start_h, start_w);
}
else
{
caffe::caffe_copy(src.count(),src.gpu_data(),dst.mutable_gpu_data());
}
}
template void CropBlobs_gpu( const Blob<float>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<float>&dst);
template void CropBlobs_gpu( const Blob<double>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<double>&dst);
template <typename Dtype>
__global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data,
const int num, const int channels, const int in_h, const int in_w,
const int dst_num, const int dst_h, const int dst_w,
const int src_num_id, const int dst_num_id,const int out_h, const int out_w,
const int start_h, const int start_w, const int dst_start_h, const int dst_start_w){
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index/(out_h*out_w))% channels;
int h = (index%(out_h*out_w))/out_w;
int w = (index%(out_h*out_w))%out_w;
Dtype* dst_data_ptr =dst_data+ ((dst_num_id* channels+c)*dst_h )*dst_w ;
const Dtype* src_data_ptr = src_data + ((src_num_id* channels+c)*in_h )*in_w ;
int true_src_h = h+start_h;
int true_dst_h = h+dst_start_h;
int true_src_w = w+start_w;
int true_dst_w = w + dst_start_w;
if(true_src_h >= 0 && true_src_h < in_h && true_src_w >= 0 && true_src_w < in_w &&
true_dst_h >= 0 && true_dst_h < dst_h && true_dst_w>= 0 && true_dst_w< dst_w )
dst_data_ptr[true_dst_h *dst_w + true_dst_w] =
src_data_ptr[true_src_h * in_w + true_src_w];
}
}
template <typename Dtype>
void CropBlobs_gpu( const Blob<Dtype>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<Dtype>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w ){
const int in_h = src.height();
const int in_w = src.width();
const int dst_h = dst.height();
const int dst_w = dst.width();
const int channels = src.channels();
const int out_h = end_h - start_h;
const int out_w = end_w - start_w;
CHECK(out_h > 0) <<" end_h should be larger than start_h";
CHECK(out_w > 0) <<" end_w should be larger than start_w";
// CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height";
// CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width";
CHECK_GT(src.num(), src_num_id);
CHECK_GT(dst.num(), dst_num_id);
CHECK_EQ(channels, dst.channels());
// CHECK_GE(dst.height(), end_h);
// CHECK_GE(dst.width(), end_w);
const int loop_n = channels*out_h*out_w;
hipLaunchKernelGGL(( kernel_CropBlob <Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, loop_n,
src.gpu_data(), dst.mutable_gpu_data(),
src.num(), channels, in_h, in_w,
dst.num(),dst_h,dst_w, src_num_id,dst_num_id,
out_h, out_w, start_h, start_w, dst_start_h, dst_start_w);
}
template void CropBlobs_gpu( const Blob<float>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<float>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w );
template void CropBlobs_gpu( const Blob<double>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<double>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w );
/*
template <typename Dtype>
void ConcateSubImagesInBlobs_gpu(const Blob<Dtype>& src,
Blob<Dtype>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w)
{
const int in_nums = src.num();
const int height_col_ =(out_img_h + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col_ = (out_img_w + 2 * pad_w - kernel_w) / stride_w + 1;
// std::cout<<"in_nums:"<<in_nums<<" kernel_h:"<<kernel_h<<" kernel_w:"<<kernel_w
// <<" pad_h:"<<pad_h<<" pad_w:"<<pad_w<<" stride_h:"<<stride_h<<
// " stride_w:"<<stride_w<<" out_img_h:"<<out_img_h <<" out_img_w:"<<out_img_w
// << " height_col:"<<height_col_<<" width_col:"<<width_col_<<std::endl;
dst.Reshape(in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w);
// std::cout<<"in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w: "<<
// in_nums/height_col_/width_col_<< " "<<src.channels()<<" "<<out_img_h<<" "<<
// out_img_w<<std::endl;
const int channels_ = dst.channels();
const int height_ = dst.height();
const int width_ = dst.width();
const int out_num = dst.num();
for(int n = 0; n < out_num; n++){
const Dtype* src_data = src.gpu_data() + src.offset(n*height_col_*width_col_);
Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n);
caffe::col2im_v2_gpu(src_data, channels_, height_,
width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dst_data);
}
return;
}
template void ConcateSubImagesInBlobs_gpu(const Blob<float>& src,
Blob<float>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w);
template void ConcateSubImagesInBlobs_gpu(const Blob<double>& src,
Blob<double>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w);
*/
// namespace caffe
}
| 7c2b7dea39ece03433dd4cdaf5dd5d37831d18c3.cu |
/**
* developed by zhujin
*/
#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/io/coded_stream.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/util_img.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/proto/caffe.pb.h"
namespace caffe {
template <typename Dtype>
__global__ void kernel_BiLinearResize(const int nthreads, const Dtype* src_data, const int src_height, const int src_width,
Dtype* dst_data, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(i, nthreads) {
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}
template <typename Dtype>
void BiLinearResizeMat_gpu(const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width)
{
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width;
kernel_BiLinearResize<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(
loop_n,src, src_height, src_width, dst, dst_height, dst_width, scale_h, scale_w);
//CUDA_POST_KERNEL_CHECK;
}
template void BiLinearResizeMat_gpu(const float* src, const int src_height, const int src_width,
float* dst, const int dst_height, const int dst_width);
template void BiLinearResizeMat_gpu(const double* src, const int src_height, const int src_width,
double* dst, const int dst_height, const int dst_width);
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src, const int src_n, const int src_c,
Blob<Dtype>* dst, const int dst_n, const int dst_c) {
const int src_channels = src->channels();
const int src_height = src->height();
const int src_width = src->width();
const int src_offset = (src_n * src_channels + src_c) * src_height * src_width;
const int dst_channels = dst->channels();
const int dst_height = dst->height();
const int dst_width = dst->width();
const int dst_offset = (dst_n * dst_channels + dst_c) * dst_height * dst_width;
const Dtype* src_data = &(src->gpu_data()[src_offset]);
Dtype* dst_data = &(dst->mutable_gpu_data()[dst_offset]);
BiLinearResizeMat_gpu(src_data, src_height, src_width,
dst_data, dst_height, dst_width);
CUDA_POST_KERNEL_CHECK;
}
template void ResizeBlob_gpu(const Blob<float>* src, const int src_n, const int src_c,
Blob<float>* dst, const int dst_n, const int dst_c);
template void ResizeBlob_gpu(const Blob<double>* src, const int src_n, const int src_c,
Blob<double>* dst, const int dst_n, const int dst_c);
template <typename Dtype>
__global__ void kernel_GetBiLinearResizeMatRules(const int nthreads, const int src_height, const int src_width,
const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int dst_h = index /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = index %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
// dst_data[dst_idx] = 0;
const int src_idx = src_offset_1 + src_w;
loc1[dst_idx] = src_idx;
weight1[dst_idx] = w_h0 * w_w0;
loc2[dst_idx] = 0;
weight2[dst_idx] = 0;
weight3[dst_idx] = 0;
loc3[dst_idx] = 0;
loc4[dst_idx] = 0;
weight4[dst_idx] = 0;
if (src_w + 1 < src_width)
{
loc2[dst_idx] = src_idx + 1;
weight2[dst_idx] = w_h0 * w_w1;
// dst_data[dst_idx] += (w_h0 * w_w1 * src_data[src_idx + 1]);
}
if (src_h + 1 < src_height)
{
// dst_data[dst_idx] += (w_h1 * w_w0 * src_data[src_idx + src_width]);
weight3[dst_idx] = w_h1 * w_w0;
loc3[dst_idx] = src_idx + src_width;
}
if (src_w + 1 < src_width && src_h + 1 < src_height)
{
loc4[dst_idx] = src_idx + src_width + 1;
weight4[dst_idx] = w_h1 * w_w1;
// dst_data[dst_idx] += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
}
}
}
template <typename Dtype>
__global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int i = index %( dst_height * dst_width);
int c = (index/(dst_height * dst_width))%channels;
int n = (index/(dst_height * dst_width))/channels;
int src_offset = (n * channels + c) * src_height * src_width;
int dst_offset = (n * channels + c) * dst_height * dst_width;
const Dtype* src_data = src+src_offset;
Dtype* dst_data = dst+dst_offset;
int dst_h = i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}
/*
// new version by Sifei Liu
template <typename Dtype>
__global__ void kernel_ResizeBlob(const int nthreads,const int num,const int channels, const Dtype* src, const int src_height, const int src_width,
Dtype* dst, const int dst_height, const int dst_width, const Dtype scale_h, const Dtype scale_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int d_i = index %( dst_height * dst_width);
int d_c = (index/(dst_height * dst_width))%channels;
int d_n = (index/(dst_height * dst_width))/channels;
int s_c = (index/(src_height * src_width))%channels;
int s_n = (index/(src_height * src_width))/channels;
int src_offset = (s_n * channels + s_c) * src_height * src_width;
int dst_offset = (d_n * channels + d_c) * dst_height * dst_width;
const Dtype* src_data = src+src_offset;
Dtype* dst_data = dst+dst_offset;
int dst_h = d_i /dst_width;
Dtype fh = dst_h * scale_h;
const int src_h = floor(fh);
fh -= src_h;
const Dtype w_h0 = std::abs(1.0f - fh);
const Dtype w_h1 = std::abs(fh);
const int dst_offset_1 = dst_h * dst_width;
const int src_offset_1 = src_h * src_width;
int dst_w = d_i %dst_width;
Dtype fw = dst_w * scale_w;
const int src_w = floor(fw);
fw -= src_w;
const Dtype w_w0 = std::abs(1.0f - fw);
const Dtype w_w1 = std::abs(fw);
const int dst_idx = dst_offset_1 + dst_w;
const int src_idx = src_offset_1 + src_w;
Dtype res = (w_h0 * w_w0 * src_data[src_idx]);
if (src_w + 1 < src_width)
res += (w_h0 * w_w1 * src_data[src_idx + 1]);
if (src_h + 1 < src_height)
res += (w_h1 * w_w0 * src_data[src_idx + src_width]);
if (src_w + 1 < src_width && src_h + 1 < src_height)
res += (w_h1 * w_w1 * src_data[src_idx + src_width + 1]);
dst_data[dst_idx] = res;
}
}*/
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst) {
CHECK(src->num() == dst->num())<<"src->num() == dst->num()";
CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()";
const int src_num = src->num();
const int src_channels = src->channels();
const int src_height = src->height();
const int src_width = src->width();
const int dst_channels = dst->channels();
const int dst_height = dst->height();
const int dst_width = dst->width();
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width*dst_channels*src_num;
const Dtype* src_data = src->gpu_data();
Dtype* dst_data = dst->mutable_gpu_data();
kernel_ResizeBlob<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(loop_n,src_num,src_channels,
src_data, src_height,src_width,
dst_data, dst_height, dst_width,
scale_h,scale_w);
CUDA_POST_KERNEL_CHECK;
}
template void ResizeBlob_gpu(const Blob<float>* src,
Blob<float>* dst);
template void ResizeBlob_gpu(const Blob<double>* src,
Blob<double>* dst);
template <typename Dtype>
void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
Dtype* loc1, Dtype* weight1, Dtype* loc2, Dtype* weight2,
Dtype* loc3, Dtype* weight3, Dtype* loc4, Dtype* weight4)
{
const Dtype scale_w = src_width / (Dtype)dst_width;
const Dtype scale_h = src_height / (Dtype)dst_height;
int loop_n = dst_height * dst_width;
kernel_GetBiLinearResizeMatRules<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(
loop_n, src_height, src_width,
dst_height, dst_width, scale_h, scale_w,
loc1, weight1, loc2, weight2,
loc3, weight3, loc4, weight4);
CUDA_POST_KERNEL_CHECK;
}
template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
float* loc1, float* weight1, float* loc2, float* weight2,
float* loc3, float* weight3, float* loc4, float* weight4);
template void GetBiLinearResizeMatRules_gpu( const int src_height, const int src_width,
const int dst_height, const int dst_width,
double* loc1, double* weight1, double* loc2, double* weight2,
double* loc3, double* weight3, double* loc4, double* weight4);
template <typename Dtype>
void ResizeBlob_gpu(const Blob<Dtype>* src,Blob<Dtype>* dst,
Blob<Dtype>* loc1, Blob<Dtype>* loc2, Blob<Dtype>* loc3, Blob<Dtype>* loc4){
CHECK(src->num() == dst->num())<<"src->num() == dst->num()";
CHECK(src->channels() == dst->channels())<< "src->channels() == dst->channels()";
GetBiLinearResizeMatRules_gpu( src->height(),src->width(),
dst->height(), dst->width(),
loc1->mutable_gpu_data(), loc1->mutable_gpu_diff(), loc2->mutable_gpu_data(), loc2->mutable_gpu_diff(),
loc3->mutable_gpu_data(), loc3->mutable_gpu_diff(), loc4->mutable_gpu_data(), loc4->mutable_gpu_diff());
ResizeBlob_gpu( src, dst) ;
}
template void ResizeBlob_gpu(const Blob<float>* src,Blob<float>* dst,
Blob<float>* loc1, Blob<float>* loc2, Blob<float>* loc3, Blob<float>* loc4);
template void ResizeBlob_gpu(const Blob<double>* src,Blob<double>* dst,
Blob<double>* loc1, Blob<double>* loc2, Blob<double>* loc3, Blob<double>* loc4);
/*
template <typename Dtype>
void GenerateSubBlobs_gpu(const Blob<Dtype>& src,
Blob<Dtype>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w)
{
const int nums_ = src.num();
const int channels_ = src.channels();
const int height_ = src.height();
const int width_ = src.width();
const int height_col_ =(height_ + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col_ = (width_ + 2 * pad_w - kernel_w) / stride_w + 1;
*
* actually after im2col_v2, data is stored as
* col_buffer_.Reshape(1*height_out_*width_out_, channels_ , kernel_h_ , kernel_w_);
* *
dst.Reshape(height_col_*width_col_*nums_,channels_, kernel_h, kernel_w);
caffe::caffe_gpu_set(dst.count(),Dtype(0),dst.mutable_gpu_data());
for(int n = 0; n < nums_; n++){
const Dtype* src_data = src.gpu_data() + src.offset(n);
Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n*height_col_*width_col_);
caffe::im2col_v2_gpu(src_data, channels_, height_,
width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dst_data);
}
}
template void GenerateSubBlobs_gpu(const Blob<float>& src,
Blob<float>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w);
template void GenerateSubBlobs_gpu(const Blob<double>& src,
Blob<double>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w);
*/
template <typename Dtype>
__global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data,
const int num, const int channels, const int in_h, const int in_w,
const int out_h, const int out_w, const int start_h, const int start_w)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int n = index/channels/out_h/out_w;
int c = (index/(out_h*out_w))% channels;
int h = (index%(out_h*out_w))/out_w;
int w = (index%(out_h*out_w))%out_w;
Dtype* dst_data_ptr =dst_data+ ((n* channels+c)*out_h )*out_w ;
const Dtype* src_data_ptr = src_data + ((n* channels+c)*in_h )*in_w ;
dst_data_ptr[h*out_w+w] = src_data_ptr[(h+start_h)*in_w + w+start_w];
}
}
template <typename Dtype>
void CropBlobs_gpu( const Blob<Dtype>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<Dtype>&dst)
{
const int in_h = src.height();
const int in_w = src.width();
const int num = src.num();
const int channels = src.channels();
const int out_h = end_h - start_h;
const int out_w = end_w - start_w;
CHECK(out_h > 0) <<" end_h should be larger than start_h";
CHECK(out_w > 0) <<" end_w should be larger than start_w";
CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height";
CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width";
dst.Reshape(num,channels,out_h,out_w);
if((out_h != in_h) || (out_w != in_w)){
const int loop_n = num*channels*out_h*out_w;
kernel_CropBlob <Dtype> <<< CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>> (loop_n,
src.gpu_data(), dst.mutable_gpu_data(),
num, channels, in_h, in_w, out_h, out_w, start_h, start_w);
}
else
{
caffe::caffe_copy(src.count(),src.gpu_data(),dst.mutable_gpu_data());
}
}
template void CropBlobs_gpu( const Blob<float>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<float>&dst);
template void CropBlobs_gpu( const Blob<double>&src,
const int start_h, const int start_w,
const int end_h, const int end_w, Blob<double>&dst);
template <typename Dtype>
__global__ void kernel_CropBlob(const int nthreads, const Dtype* src_data, Dtype* dst_data,
const int num, const int channels, const int in_h, const int in_w,
const int dst_num, const int dst_h, const int dst_w,
const int src_num_id, const int dst_num_id,const int out_h, const int out_w,
const int start_h, const int start_w, const int dst_start_h, const int dst_start_w){
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index/(out_h*out_w))% channels;
int h = (index%(out_h*out_w))/out_w;
int w = (index%(out_h*out_w))%out_w;
Dtype* dst_data_ptr =dst_data+ ((dst_num_id* channels+c)*dst_h )*dst_w ;
const Dtype* src_data_ptr = src_data + ((src_num_id* channels+c)*in_h )*in_w ;
int true_src_h = h+start_h;
int true_dst_h = h+dst_start_h;
int true_src_w = w+start_w;
int true_dst_w = w + dst_start_w;
if(true_src_h >= 0 && true_src_h < in_h && true_src_w >= 0 && true_src_w < in_w &&
true_dst_h >= 0 && true_dst_h < dst_h && true_dst_w>= 0 && true_dst_w< dst_w )
dst_data_ptr[true_dst_h *dst_w + true_dst_w] =
src_data_ptr[true_src_h * in_w + true_src_w];
}
}
template <typename Dtype>
void CropBlobs_gpu( const Blob<Dtype>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<Dtype>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w ){
const int in_h = src.height();
const int in_w = src.width();
const int dst_h = dst.height();
const int dst_w = dst.width();
const int channels = src.channels();
const int out_h = end_h - start_h;
const int out_w = end_w - start_w;
CHECK(out_h > 0) <<" end_h should be larger than start_h";
CHECK(out_w > 0) <<" end_w should be larger than start_w";
// CHECK(out_h <=in_h) <<" out_h should nor be larger than input_height";
// CHECK(out_w <=in_w) <<" out_w should nor be larger than input_width";
CHECK_GT(src.num(), src_num_id);
CHECK_GT(dst.num(), dst_num_id);
CHECK_EQ(channels, dst.channels());
// CHECK_GE(dst.height(), end_h);
// CHECK_GE(dst.width(), end_w);
const int loop_n = channels*out_h*out_w;
kernel_CropBlob <Dtype> <<< CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>> (loop_n,
src.gpu_data(), dst.mutable_gpu_data(),
src.num(), channels, in_h, in_w,
dst.num(),dst_h,dst_w, src_num_id,dst_num_id,
out_h, out_w, start_h, start_w, dst_start_h, dst_start_w);
}
template void CropBlobs_gpu( const Blob<float>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<float>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w );
template void CropBlobs_gpu( const Blob<double>&src, const int src_num_id, const int start_h,
const int start_w, const int end_h, const int end_w, Blob<double>&dst,
const int dst_num_id,const int dst_start_h , const int dst_start_w );
/*
template <typename Dtype>
void ConcateSubImagesInBlobs_gpu(const Blob<Dtype>& src,
Blob<Dtype>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w)
{
const int in_nums = src.num();
const int height_col_ =(out_img_h + 2 * pad_h - kernel_h) / stride_h + 1;
const int width_col_ = (out_img_w + 2 * pad_w - kernel_w) / stride_w + 1;
// std::cout<<"in_nums:"<<in_nums<<" kernel_h:"<<kernel_h<<" kernel_w:"<<kernel_w
// <<" pad_h:"<<pad_h<<" pad_w:"<<pad_w<<" stride_h:"<<stride_h<<
// " stride_w:"<<stride_w<<" out_img_h:"<<out_img_h <<" out_img_w:"<<out_img_w
// << " height_col:"<<height_col_<<" width_col:"<<width_col_<<std::endl;
dst.Reshape(in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w);
// std::cout<<"in_nums/height_col_/width_col_,src.channels(), out_img_h, out_img_w: "<<
// in_nums/height_col_/width_col_<< " "<<src.channels()<<" "<<out_img_h<<" "<<
// out_img_w<<std::endl;
const int channels_ = dst.channels();
const int height_ = dst.height();
const int width_ = dst.width();
const int out_num = dst.num();
for(int n = 0; n < out_num; n++){
const Dtype* src_data = src.gpu_data() + src.offset(n*height_col_*width_col_);
Dtype* dst_data = dst.mutable_gpu_data() + dst.offset(n);
caffe::col2im_v2_gpu(src_data, channels_, height_,
width_, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dst_data);
}
return;
}
template void ConcateSubImagesInBlobs_gpu(const Blob<float>& src,
Blob<float>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w);
template void ConcateSubImagesInBlobs_gpu(const Blob<double>& src,
Blob<double>& dst,const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int out_img_h, const int out_img_w);
*/
// namespace caffe
}
|
5a1ce673690d9a5ea874d6d204719d492b9fa00f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
// Ignore Bias
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 5a1ce673690d9a5ea874d6d204719d492b9fa00f.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/layers/cudnn_conv_layer.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
// Ignore Bias
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
e2af4e33dc5d78a842736266d0aa9ba3e899e4fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void __doubleToFloat(double *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} | e2af4e33dc5d78a842736266d0aa9ba3e899e4fb.cu | #include "includes.h"
__global__ void __doubleToFloat(double *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} |
31b40fc820ae669ee41b25fd0a79cf97db31832d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
// CUDA kernel ------------------------------------------------------------------------------------------
__global__ void SSSP_gpu(Node *graph_nodes_av, Edge *graph_edges_av, int *cost,
int *color, int *q1, int *q2, int *n_t,
int *head, int *tail, int *threads_end,
int *threads_run, int *overflow, int *gray_shade,
int LIMIT, int CPU) {
extern __shared__ int l_mem[];
int* tail_bin = l_mem;
int* l_qout = (int*)&tail_bin[1];
int* shift = (int*)&l_qout[W_QUEUE_SIZE];
int* base = (int*)&shift[1];
const int tid = threadIdx.x;
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
const int MAXWG = gridDim.x;
const int WG_SIZE = blockDim.x;
int *qin, *qout;
int iter = 1;
while(*n_t != 0) {
// Swap queues
if(iter % 2 == 0) {
qin = q1;
qout = q2;
} else {
qin = q2;
qout = q1;
}
if((*n_t >= LIMIT) | (CPU == 0)) {
int gray_shade_local = atomicAdd_system(&gray_shade[0], 0);
if(tid == 0) {
// Reset queue
*tail_bin = 0;
}
// Fetch frontier elements from the queue
if(tid == 0)
*base = atomicAdd_system(&head[0], WG_SIZE);
__syncthreads();
int my_base = *base;
while(my_base < *n_t) {
// If local queue might overflow
if(*tail_bin >= W_QUEUE_SIZE / 2) {
if(tid == 0) {
// Add local tail_bin to tail
*shift = atomicAdd_system(&tail[0], *tail_bin);
}
__syncthreads();
int local_shift = tid;
while(local_shift < *tail_bin) {
qout[*shift + local_shift] = l_qout[local_shift];
// Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration
local_shift += WG_SIZE;
}
__syncthreads();
if(tid == 0) {
// Reset local queue
*tail_bin = 0;
}
__syncthreads();
}
if(my_base + tid < *n_t && *overflow == 0) {
// Visit a node from the current frontier
int pid = qin[my_base + tid];
//////////////// Visit node ///////////////////////////
atomicExch_system(&color[pid], BLACK); // Node visited
int cur_cost = atomicAdd_system(&cost[pid], 0); // Look up shortest-path distance to this node
Node cur_node;
cur_node.x = graph_nodes_av[pid].x;
cur_node.y = graph_nodes_av[pid].y;
Edge cur_edge;
// For each outgoing edge
for(int i = cur_node.x; i < cur_node.y + cur_node.x; i++) {
cur_edge.x = graph_edges_av[i].x;
cur_edge.y = graph_edges_av[i].y;
int id = cur_edge.x;
int cost_local = cur_edge.y;
cost_local += cur_cost;
int orig_cost = atomicMax_system(&cost[id], cost_local);
if(orig_cost < cost_local) {
int old_color = atomicMax_system(&color[id], gray_shade_local);
if(old_color != gray_shade_local) {
// Push to the queue
int tail_index = atomicAdd(tail_bin, 1);
if(tail_index >= W_QUEUE_SIZE) {
*overflow = 1;
break;
} else
l_qout[tail_index] = id;
}
}
}
}
if(tid == 0)
*base = atomicAdd_system(&head[0], WG_SIZE); // Fetch more frontier elements from the queue
__syncthreads();
my_base = *base;
}
/////////////////////////////////////////////////////////
// Compute size of the output and allocate space in the global queue
if(tid == 0) {
*shift = atomicAdd_system(&tail[0], *tail_bin);
}
__syncthreads();
///////////////////// CONCATENATE INTO HOST COHERENT MEMORY /////////////////////
int local_shift = tid;
while(local_shift < *tail_bin) {
qout[*shift + local_shift] = l_qout[local_shift];
// Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration
local_shift += WG_SIZE;
}
//////////////////////////////////////////////////////////////////////////
}
// Synchronization
if(*overflow == 1) {
break;
}
if(CPU) { // if CPU is available
iter++;
if(tid == 0) {
atomicAdd_system(&threads_end[0], WG_SIZE);
while(atomicAdd_system(&threads_run[0], 0) < iter) {
}
}
} else { // if GPU only
iter++;
if(tid == 0)
atomicAdd_system(&threads_end[0], WG_SIZE);
if(gtid == 0) {
while(atomicAdd_system(&threads_end[0], 0) != MAXWG * WG_SIZE) {
}
*n_t = atomicAdd_system(&tail[0], 0);
atomicExch_system(&tail[0], 0);
atomicExch_system(&head[0], 0);
atomicExch_system(&threads_end[0], 0);
if(iter % 2 == 0)
atomicExch_system(&gray_shade[0], GRAY0);
else
atomicExch_system(&gray_shade[0], GRAY1);
atomicAdd_system(&threads_run[0], 1);
}
if(tid == 0 && gtid != 0) {
while(atomicAdd_system(&threads_run[0], 0) < iter) {
}
}
}
__syncthreads();
}
}
hipError_t call_SSSP_gpu(int blocks, int threads, Node *graph_nodes_av, Edge *graph_edges_av, int *cost,
int *color, int *q1, int *q2, int *n_t,
int *head, int *tail, int *threads_end, int *threads_run,
int *overflow, int *gray_shade, int LIMIT, const int CPU, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
hipLaunchKernelGGL(( SSSP_gpu), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, graph_nodes_av, graph_edges_av, cost,
color, q1, q2, n_t,
head, tail, threads_end, threads_run,
overflow, gray_shade, LIMIT, CPU);
hipError_t err = hipGetLastError();
return err;
}
| 31b40fc820ae669ee41b25fd0a79cf97db31832d.cu | /*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
// CUDA kernel ------------------------------------------------------------------------------------------
__global__ void SSSP_gpu(Node *graph_nodes_av, Edge *graph_edges_av, int *cost,
int *color, int *q1, int *q2, int *n_t,
int *head, int *tail, int *threads_end,
int *threads_run, int *overflow, int *gray_shade,
int LIMIT, int CPU) {
extern __shared__ int l_mem[];
int* tail_bin = l_mem;
int* l_qout = (int*)&tail_bin[1];
int* shift = (int*)&l_qout[W_QUEUE_SIZE];
int* base = (int*)&shift[1];
const int tid = threadIdx.x;
const int gtid = blockIdx.x * blockDim.x + threadIdx.x;
const int MAXWG = gridDim.x;
const int WG_SIZE = blockDim.x;
int *qin, *qout;
int iter = 1;
while(*n_t != 0) {
// Swap queues
if(iter % 2 == 0) {
qin = q1;
qout = q2;
} else {
qin = q2;
qout = q1;
}
if((*n_t >= LIMIT) | (CPU == 0)) {
int gray_shade_local = atomicAdd_system(&gray_shade[0], 0);
if(tid == 0) {
// Reset queue
*tail_bin = 0;
}
// Fetch frontier elements from the queue
if(tid == 0)
*base = atomicAdd_system(&head[0], WG_SIZE);
__syncthreads();
int my_base = *base;
while(my_base < *n_t) {
// If local queue might overflow
if(*tail_bin >= W_QUEUE_SIZE / 2) {
if(tid == 0) {
// Add local tail_bin to tail
*shift = atomicAdd_system(&tail[0], *tail_bin);
}
__syncthreads();
int local_shift = tid;
while(local_shift < *tail_bin) {
qout[*shift + local_shift] = l_qout[local_shift];
// Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration
local_shift += WG_SIZE;
}
__syncthreads();
if(tid == 0) {
// Reset local queue
*tail_bin = 0;
}
__syncthreads();
}
if(my_base + tid < *n_t && *overflow == 0) {
// Visit a node from the current frontier
int pid = qin[my_base + tid];
//////////////// Visit node ///////////////////////////
atomicExch_system(&color[pid], BLACK); // Node visited
int cur_cost = atomicAdd_system(&cost[pid], 0); // Look up shortest-path distance to this node
Node cur_node;
cur_node.x = graph_nodes_av[pid].x;
cur_node.y = graph_nodes_av[pid].y;
Edge cur_edge;
// For each outgoing edge
for(int i = cur_node.x; i < cur_node.y + cur_node.x; i++) {
cur_edge.x = graph_edges_av[i].x;
cur_edge.y = graph_edges_av[i].y;
int id = cur_edge.x;
int cost_local = cur_edge.y;
cost_local += cur_cost;
int orig_cost = atomicMax_system(&cost[id], cost_local);
if(orig_cost < cost_local) {
int old_color = atomicMax_system(&color[id], gray_shade_local);
if(old_color != gray_shade_local) {
// Push to the queue
int tail_index = atomicAdd(tail_bin, 1);
if(tail_index >= W_QUEUE_SIZE) {
*overflow = 1;
break;
} else
l_qout[tail_index] = id;
}
}
}
}
if(tid == 0)
*base = atomicAdd_system(&head[0], WG_SIZE); // Fetch more frontier elements from the queue
__syncthreads();
my_base = *base;
}
/////////////////////////////////////////////////////////
// Compute size of the output and allocate space in the global queue
if(tid == 0) {
*shift = atomicAdd_system(&tail[0], *tail_bin);
}
__syncthreads();
///////////////////// CONCATENATE INTO HOST COHERENT MEMORY /////////////////////
int local_shift = tid;
while(local_shift < *tail_bin) {
qout[*shift + local_shift] = l_qout[local_shift];
// Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration
local_shift += WG_SIZE;
}
//////////////////////////////////////////////////////////////////////////
}
// Synchronization
if(*overflow == 1) {
break;
}
if(CPU) { // if CPU is available
iter++;
if(tid == 0) {
atomicAdd_system(&threads_end[0], WG_SIZE);
while(atomicAdd_system(&threads_run[0], 0) < iter) {
}
}
} else { // if GPU only
iter++;
if(tid == 0)
atomicAdd_system(&threads_end[0], WG_SIZE);
if(gtid == 0) {
while(atomicAdd_system(&threads_end[0], 0) != MAXWG * WG_SIZE) {
}
*n_t = atomicAdd_system(&tail[0], 0);
atomicExch_system(&tail[0], 0);
atomicExch_system(&head[0], 0);
atomicExch_system(&threads_end[0], 0);
if(iter % 2 == 0)
atomicExch_system(&gray_shade[0], GRAY0);
else
atomicExch_system(&gray_shade[0], GRAY1);
atomicAdd_system(&threads_run[0], 1);
}
if(tid == 0 && gtid != 0) {
while(atomicAdd_system(&threads_run[0], 0) < iter) {
}
}
}
__syncthreads();
}
}
cudaError_t call_SSSP_gpu(int blocks, int threads, Node *graph_nodes_av, Edge *graph_edges_av, int *cost,
int *color, int *q1, int *q2, int *n_t,
int *head, int *tail, int *threads_end, int *threads_run,
int *overflow, int *gray_shade, int LIMIT, const int CPU, int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
SSSP_gpu<<<dimGrid, dimBlock, l_mem_size>>>(graph_nodes_av, graph_edges_av, cost,
color, q1, q2, n_t,
head, tail, threads_end, threads_run,
overflow, gray_shade, LIMIT, CPU);
cudaError_t err = cudaGetLastError();
return err;
}
|
844f43031ed4d0315100b3369ecd946fd9709fac.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "add_img.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *image_padded = NULL;
hipMalloc(&image_padded, XSIZE*YSIZE);
float *ave1 = NULL;
hipMalloc(&ave1, XSIZE*YSIZE);
float *ave2 = NULL;
hipMalloc(&ave2, XSIZE*YSIZE);
int nx = 1;
int ny = 1;
int nima = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
add_img), dim3(gridBlock),dim3(threadBlock), 0, 0, image_padded,ave1,ave2,nx,ny,nima);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
add_img), dim3(gridBlock),dim3(threadBlock), 0, 0, image_padded,ave1,ave2,nx,ny,nima);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
add_img), dim3(gridBlock),dim3(threadBlock), 0, 0, image_padded,ave1,ave2,nx,ny,nima);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 844f43031ed4d0315100b3369ecd946fd9709fac.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "add_img.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *image_padded = NULL;
cudaMalloc(&image_padded, XSIZE*YSIZE);
float *ave1 = NULL;
cudaMalloc(&ave1, XSIZE*YSIZE);
float *ave2 = NULL;
cudaMalloc(&ave2, XSIZE*YSIZE);
int nx = 1;
int ny = 1;
int nima = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
add_img<<<gridBlock,threadBlock>>>(image_padded,ave1,ave2,nx,ny,nima);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
add_img<<<gridBlock,threadBlock>>>(image_padded,ave1,ave2,nx,ny,nima);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
add_img<<<gridBlock,threadBlock>>>(image_padded,ave1,ave2,nx,ny,nima);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ef507a817ef4eaac815054cd34b6925366d6eda4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void vector_add(int *d_a, int *d_b, int *d_c, int n){
int i = blockIdx.x*blockDim.x + threadIdx.x;
d_c[i] = d_a[i] + d_b[i];
}
int main(void){
printf("Hello, World - from CPU!\n");
int a[4] = {22,13,16,5};
int b[4] = {5,22,17,37};
int c[4];
int *d_a;
int *d_b;
int *d_c;
hipMalloc((void**)&d_a,sizeof(int)*4);
hipMalloc((void**)&d_b,sizeof(int)*4);
hipMalloc((void**)&d_c,sizeof(int)*4);
hipMemcpy(d_a,a,sizeof(int)*4,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,sizeof(int)*4,hipMemcpyHostToDevice);
//hipMemcpy(d_c,c,sizeof(int)*4,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vector_add), dim3(1),dim3(4), 0, 0, d_a,d_b,d_c,4);
hipMemcpy(c,d_c,sizeof(int)*4,hipMemcpyDeviceToHost);
printf("%d,%d,%d,%d\n",c[0],c[1],c[2],c[3]);
return 0;
}
| ef507a817ef4eaac815054cd34b6925366d6eda4.cu | #include <stdio.h>
__global__ void vector_add(int *d_a, int *d_b, int *d_c, int n){
int i = blockIdx.x*blockDim.x + threadIdx.x;
d_c[i] = d_a[i] + d_b[i];
}
int main(void){
printf("Hello, World - from CPU!\n");
int a[4] = {22,13,16,5};
int b[4] = {5,22,17,37};
int c[4];
int *d_a;
int *d_b;
int *d_c;
cudaMalloc((void**)&d_a,sizeof(int)*4);
cudaMalloc((void**)&d_b,sizeof(int)*4);
cudaMalloc((void**)&d_c,sizeof(int)*4);
cudaMemcpy(d_a,a,sizeof(int)*4,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,sizeof(int)*4,cudaMemcpyHostToDevice);
//cudaMemcpy(d_c,c,sizeof(int)*4,cudaMemcpyHostToDevice);
vector_add<<<1,4>>>(d_a,d_b,d_c,4);
cudaMemcpy(c,d_c,sizeof(int)*4,cudaMemcpyDeviceToHost);
printf("%d,%d,%d,%d\n",c[0],c[1],c[2],c[3]);
return 0;
}
|
632573dd9861548689d29c0cb58ad6e28d148889.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include "sceneStructs.h"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#include "glm/glm.hpp"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <thrust/remove.h>
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
std::cin.get();
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Kernel that does the initial raycast from the camera.
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
//standard camera raycast stuff
glm::vec3 E = eye;
glm::vec3 C = view;
glm::vec3 U = up;
float fovx = fov.x;
float fovy = fov.y;
float CD = glm::length(C);
glm::vec3 A = glm::cross(C, U);
glm::vec3 B = glm::cross(A, C);
glm::vec3 M = E+C;
glm::vec3 H = (A*float(CD*tan(fovx*(PI/180))))/float(glm::length(A));
glm::vec3 V = (B*float(CD*tan(-fovy*(PI/180))))/float(glm::length(B));
float sx = (x)/(resolution.x-1);
float sy = (y)/(resolution.y-1);
glm::vec3 P = M + (((2*sx)-1)*H) + (((2*sy)-1)*V);
glm::vec3 PmE = P-E;
glm::vec3 R = E + (float(200)*(PmE))/float(glm::length(PmE));
glm::vec3 direction = glm::normalize(R);
//major performance cliff at this point, TODO: find out why!
ray r;
r.origin = eye;
r.direction = direction;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__global__ void raytoColorbouncecopy(glm::vec2 resolution,glm::vec3* colBounce, ray* r, int num,int blockdim)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * blockdim* blockDim.x );
if(index < num)
{
int xx = r[index].x;
int yy = r[index].y;
int newindex = xx + (yy * resolution.x);
if(r[index].life == true && r[index].rcolor[0] !=0 && r[index].rcolor[1] !=0 && r[index].rcolor[2] !=0)
colBounce[newindex] = r[index].rcolor ;
//if(r[newindex].life == true)
/// r[newindex].rcolor = glm::vec3(1,1,1);
}
}
__global__ void finalizeraycolor(glm::vec2 resolution,glm::vec3* colBounce,glm::vec3* colors,float iters)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
//int xx = r[index].x;
//int yy = r[index].y;
// int newindex = xx + (yy * resolution.x);
if(iters < 0.05)
iters = 1.0f;
if((x<=resolution.x && y<=resolution.y)){
/*colIters[index] = colIters[index] + colBounce[index];
colIters[index][0] = ((colIters[index][0] * (iters - 1)) + colBounce[index][0]) / iters ;
colIters[index][1] = ((colIters[index][1] * (iters - 1)) + colBounce[index][1]) / iters ;
colIters[index][2] = ((colIters[index][2] * (iters - 1)) + colBounce[index][2]) / iters ;*/
//colors[index ][0] = (colors[index][0] + colBounce[index][0] )/(iters+1) ; //colBounce[index] ;//
//colors[index ][1] = (colors[index][1] + colBounce[index][1] )/(iters+1) ;
//colors[index ][2] = (colors[index][2] + colBounce[index][2] )/(iters+1) ;
colors[index ] = (colors[index ] *(iters-1) + colBounce[index])/ (iters) ;
// colBounce[index ] = glm::vec3(1,1,1);
//r[index ].rcolor = glm::vec3(1,1,1);
}
}
__global__ void initializeray(glm::vec2 resolution, float time,cameraData cam, ray* r,glm::vec3* colBounce,glm::vec3* colIters){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x && y<=resolution.y)){
ray rnew = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
// Depth of Field
// glm::vec3 dofRayPoint = rnew.origin + 14.0f * glm::normalize(rnew.direction) ;
// thrust::default_random_engine rng (hash (time ));
// thrust::uniform_real_distribution<float> xi6(-1,1);
// thrust::uniform_real_distribution<float> xi7(-1,1);
// thrust::uniform_real_distribution<float> r1(-1.0,1.0);
//// srand(time);
// float dx = r1(rng) ;//* cos(xi6(rng)); //((int)xi6(rng) % 100 + 1 )/1000;//
// float dy = r1(rng) ;//* sin(xi7(rng)); //((int)xi7(rng) % 100 + 1 )/1000; //
//
// rnew.origin = rnew.origin + glm::vec3(dx,dy,0.0f);
// rnew.direction = glm::normalize(dofRayPoint - rnew.origin );
r[index].direction = glm::normalize(rnew.direction);
r[index].origin = rnew.origin;
r[index].x = x ;
r[index].y = y ;
r[index].life = false ;
r[index].rcolor = glm::vec3(1,1,1);
colBounce[index] = glm::vec3(0,0,0);
colIters[index] = glm::vec3(0,0,0);
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, float bounce, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials,ray* newr, glm::vec3* colBounce, int bou,int num,int blockdim,glm::vec3* myvertex, int numVertices,float *m ){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * blockdim* blockDim.x );//
if ( index < num )
{
//ray r = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
ray r = newr[index];
int xx = r.x;
int yy = r.y;
// int newindex = xx + (yy * blockdim);
glm::vec3 curIps;
glm::vec3 curNorm;
if((x<=resolution.x && y<=resolution.y)){
float MAX_DEPTH = 100000000000000000;
float depth = MAX_DEPTH;
int geoIndex = -1;
for(int i=0; i<numberOfGeoms; i++){
glm::vec3 intersectionPoint;
glm::vec3 intersectionNormal;
if(geoms[i].type==SPHERE){
depth = sphereIntersectionTest(geoms[i], r, intersectionPoint, intersectionNormal);
}else if(geoms[i].type==CUBE){
depth = boxIntersectionTest(geoms[i], r, intersectionPoint, intersectionNormal);
}else if(geoms[i].type==MESH){
depth = boxIntersectionTest( glm::vec3(m[1],m[5],m[3]) ,glm::vec3(m[0],m[4],m[2]),geoms[i],r,intersectionPoint, intersectionNormal);
if (depth != -1)
depth = meshIntersectionTest(geoms[i],r,myvertex,numVertices,intersectionPoint, intersectionNormal);
}else{
//lol?
}
if(depth<MAX_DEPTH && depth>-EPSILON){
MAX_DEPTH = depth;
geoIndex = i;
curIps = intersectionPoint;
curNorm = intersectionNormal;
}
}
// If you are hitting a object that is not light
if(geoIndex != -1 && materials[geoms[geoIndex].materialid].emittance < 0.01f && (r.life == false))
{
thrust::default_random_engine rng (hash (time * index * bou));
thrust::uniform_real_distribution<float> xi1(0,1);
thrust::uniform_real_distribution<float> xi2(0,1);
// If the object that you hit is not reflective
if ( materials[geoms[geoIndex].materialid].hasReflective < 0.01f && materials[geoms[geoIndex].materialid].hasRefractive < 0.01f)
{
newr[index].direction = glm::normalize(calculateRandomDirectionInHemisphere(glm::normalize(curNorm), (float)xi1(rng),(float)xi2(rng)));
newr[index].origin = curIps + newr[index].direction * 0.001f ; //glm::vec3 neyep = dips + ref1 * 0.001f ;
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].color;
}
// If the object that you hit is reflective
else if ( materials[geoms[geoIndex].materialid].hasReflective > 0.01f && materials[geoms[geoIndex].materialid].hasRefractive < 0.01f)
{
// Reflectitivity works based on probabbility of the random number generated
thrust::uniform_real_distribution<float> xi3(0,1);
float rtest = (float)xi3(rng) ;
if( rtest < materials[geoms[geoIndex].materialid].hasReflective)
{
glm::vec3 inc = glm::normalize(newr[index].direction) ;
newr[index].direction = inc - (2.0f * glm::normalize(curNorm) * (glm::dot(glm::normalize(curNorm),inc))); //glm::vec3 ref1 = lig - (2.0f * dnorm * (glm::dot(dnorm,lig)));
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].specularColor;
}
else
{
newr[index].direction = glm::normalize(calculateRandomDirectionInHemisphere(glm::normalize(curNorm), (float)xi1(rng),(float)xi2(rng)));
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].color;
}
newr[index].origin = curIps + newr[index].direction * 0.001f ; //glm::vec3 neyep = dips + ref1 * 0.001f ;
}
// If the object that you hit is refractive
if ( materials[geoms[geoIndex].materialid].hasRefractive > 0.01f)
{
thrust::uniform_real_distribution<float> xi4(0,1);
float rfr = (float)xi4(rng) ;
if (rfr < 0.7)//materials[geoms[geoIndex].materialid].hasRefractive )
{
float n1 = 1.0f;
float n2 = materials[geoms[geoIndex].materialid].hasRefractive;
float angleofincidence = acos(glm::dot(newr[index].direction ,glm::normalize(curNorm))/(glm::length(newr[index].direction) * glm::length(newr[index].direction)));
angleofincidence = abs(angleofincidence * (180.0f/PI));
//float angleofreflection = asin(sin(angleofincidence) * (n1/n2));
float io = glm::dot( glm::normalize(newr[index].direction),glm::normalize(curNorm));
float criticalAngle = asin(n2/n1);// * (180.0f/PI) ;
if(io < 0.0f )
{
glm::vec3 refractedray = glm::refract(glm::normalize(newr[index].direction),glm::normalize(curNorm),(n1/n2));
newr[index].direction = glm::normalize(refractedray);
newr[index].origin = curIps + newr[index].direction * 0.001f ;
//newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].color;
}
else if(io >= 0.0f ) // && (angleofincidence < criticalAngle )
{
glm::vec3 refractedray = glm::refract(glm::normalize(newr[index].direction),-1.0f * glm::normalize(curNorm),(n2/n1));
newr[index].direction = glm::normalize(refractedray);
newr[index].origin = curIps + newr[index].direction * 0.001f ;
//newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].color;
}
}
else
{
glm::vec3 inc = glm::normalize(newr[index].direction) ;
newr[index].direction = inc - (2.0f * glm::normalize(curNorm) * (glm::dot(glm::normalize(curNorm),inc))); //glm::vec3 ref1 = lig - (2.0f * dnorm * (glm::dot(dnorm,lig)));
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].specularColor;
newr[index].origin = curIps + newr[index].direction * 0.001f ;
}
}
}
// If the ray hits an object that is light
else if(geoIndex != -1 && materials[geoms[geoIndex].materialid].emittance > 0.01f && (r.life == false))
{
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].emittance;
newr[index].life = true;
}
// If the ray keeps hitting the light once it dies - This case actually never happens
else if(geoIndex != -1 && materials[geoms[geoIndex].materialid].emittance > 0.01f && (r.life == true))
{
newr[index].rcolor = newr[index].rcolor ;
newr[index].life = true;
}
// The final case where the ray does not hit any object at all
else
{
newr[index].rcolor = newr[index].rcolor * glm::vec3(0,0,0);
newr[index].life = true;
}
}
}
}
//A thrust based structure
struct is_dead
{
__host__ __device__
bool operator()(const ray r)
{
return r.life;
}
};
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms,std::vector<glm::vec3> mypoints,float *maxmin ){
int traceDepth = 1; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
int numVertices = mypoints.size();
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)) , (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice);
//Send vertices of the mesh to GPU
glm::vec3* mvertex = NULL;
hipMalloc((void**)&mvertex,mypoints.size() * sizeof(glm::vec3));
for(int i=0; i < mypoints.size(); i++){
hipMemcpy( &mvertex[i] , &mypoints[i], sizeof(glm::vec3), hipMemcpyHostToDevice);
}
//Send maxmins of the mesh to GPU
float* mami = NULL;
hipMalloc((void**)&mami,6 * sizeof(float));
if(maxmin != NULL)
{
for(int i=0; i < 6; i++){
hipMemcpy( &mami[i] , &maxmin[i], sizeof(float), hipMemcpyHostToDevice);
}
}
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice);
material* cudamaterials = NULL;
hipMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
hipMemcpy( cudamaterials, materials, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//Allocate memory for ray pool
ray* raypool = NULL;
hipMalloc((void**)&raypool, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(ray));
//Allocate memory to store color for bounces
glm::vec3* colorBounce = NULL;
hipMalloc((void**)&colorBounce, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
//Allocate memory to store color for each iteration accumulation
glm::vec3* colorIters = NULL;
hipMalloc((void**)&colorIters , (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
//Initialize the ray values
hipLaunchKernelGGL(( initializeray), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution,(float)iterations,cam,raypool,colorBounce,colorIters);
hipDeviceSynchronize();
////kernel launches
// const int N = 6;
// int A[N] = {1, 4, 2, 8, 5, 7};
// int *new_end = thrust::remove_if(A, A + N, is_even());
// ray* raystart = new ray[N] ;
//hipMemcpy( raystart, raypool, N*sizeof(ray), hipMemcpyDeviceToHost);
//for(int j=0 ; j < N ; j++)
// std::cout << raystart[j].life ;
//delete [] raystart;
//Super-sampled antialiasing code
srand(iterations);
float x = 0.0f , y = 0.0f ;
if(iterations%20 == 0 )
{
x = (rand() % 100 + 1)/1000.0f;
y = (rand() % 100 + 1)/1000.0f;
cam.position[0] +=x;
cam.position[1] +=y;
}
// Motion blur
//int mID = 5; // this gives the object id that should be moved
//float raa = (rand() % 10 + 1 )/ 10.0f ;
//float xtrans = (2.0f * (1.0f - raa)) + (3.0f * raa) ;
//geoms[mID].translations[0][0] = xtrans; // comment this line for motion blur on and off
//glm::mat4 buildTransformationMatrix(glm::vec3 translation, glm::vec3 rotation, glm::vec3 scale);
//geoms[mID].transforms[0] = utilityCore::glmMat4ToCudaMat4(utilityCore::buildTransformationMatrix(geoms[mID].translations[0],geoms[mID].rotations[0],geoms[mID].scales[0]));
//geoms[mID].inverseTransforms[0] = utilityCore::glmMat4ToCudaMat4(glm::inverse(utilityCore::cudaMat4ToGlmMat4(geoms[mID].transforms[0])));
//create events
hipEvent_t event1, event2;
hipEventCreate(&event1);
hipEventCreate(&event2);
hipEventRecord(event1, 0);
int N = ((int)renderCam->resolution.x*(int)renderCam->resolution.y);
dim3 StreamBlocksPerGrid = fullBlocksPerGrid ;
int blockdim = fullBlocksPerGrid.x ;
for(int bounce = 1; bounce <=25; ++bounce)
{
hipLaunchKernelGGL(( raytraceRay), dim3(StreamBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution, (float)iterations, (float)bounce, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudamaterials, numberOfMaterials,raypool,colorBounce,bounce,N,blockdim,mvertex,numVertices,mami);
hipLaunchKernelGGL(( raytoColorbouncecopy), dim3(StreamBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution,colorBounce,raypool,N,blockdim);
thrust::device_ptr<ray> rptr = thrust::device_pointer_cast(raypool);
ray *endrptr = thrust::remove_if(rptr,rptr + N , is_dead()).get();
N = endrptr - raypool ;
int numofBlocks = ceil((float)N / (float)(tileSize * tileSize)) ;
blockdim = ceil(sqrt((float)numofBlocks));
StreamBlocksPerGrid = dim3(blockdim,blockdim);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( finalizeraycolor), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, renderCam->resolution,colorBounce,cudaimage,(float)iterations);
hipDeviceSynchronize();
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage);
hipEventRecord(event2, 0);
//synchronize
hipEventSynchronize(event1); //optional
hipEventSynchronize(event2); //wait for the event to be executed!
//calculate time
float dt_ms;
hipEventElapsedTime(&dt_ms, event1, event2);
std::cout << dt_ms << std::endl ;
//retrieve image from GPU
hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
hipFree( cudaimage );
hipFree( cudageoms );
hipFree( cudamaterials );
hipFree(colorBounce);
hipFree(colorIters);
hipFree(raypool);
delete [] geomList;
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("Kernel failed!");
}
float __device__ meshIntersectionTest(staticGeom curGeom,ray s,glm::vec3* myvertex, int numVertices, glm::vec3& mintersect, glm::vec3& mnormal)
{
glm::vec3 ipss,normss;
float t , at = 12345.0;
glm::vec3 curnorm , curipss;
for(int k=0 ;k < numVertices - 2 ; k= k+3)
{
t = triangleIntersectionTest(curGeom,s,myvertex[k],myvertex[k+1],myvertex[k+2], ipss, normss);
if(t != -1 && t<at)
{
curnorm = normss;
curipss = ipss;
at = t;
}
}
mnormal = curnorm;
mintersect = curipss;
if (at == 12345.0)
return -1;
else
return at ;
}
| 632573dd9861548689d29c0cb58ad6e28d148889.cu | // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
// This file includes code from:
// Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097
// Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/
// Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include "sceneStructs.h"
#include "utilities.h"
#include "raytraceKernel.h"
#include "intersections.h"
#include "interactions.h"
#include <vector>
#include "glm/glm.hpp"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <thrust/remove.h>
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
std::cin.get();
exit(EXIT_FAILURE);
}
}
//LOOK: This function demonstrates how to use thrust for random number generation on the GPU!
//Function that generates static.
__host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng));
}
//Kernel that does the initial raycast from the camera.
__host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){
int index = x + (y * resolution.x);
thrust::default_random_engine rng(hash(index*time));
thrust::uniform_real_distribution<float> u01(0,1);
//standard camera raycast stuff
glm::vec3 E = eye;
glm::vec3 C = view;
glm::vec3 U = up;
float fovx = fov.x;
float fovy = fov.y;
float CD = glm::length(C);
glm::vec3 A = glm::cross(C, U);
glm::vec3 B = glm::cross(A, C);
glm::vec3 M = E+C;
glm::vec3 H = (A*float(CD*tan(fovx*(PI/180))))/float(glm::length(A));
glm::vec3 V = (B*float(CD*tan(-fovy*(PI/180))))/float(glm::length(B));
float sx = (x)/(resolution.x-1);
float sy = (y)/(resolution.y-1);
glm::vec3 P = M + (((2*sx)-1)*H) + (((2*sy)-1)*V);
glm::vec3 PmE = P-E;
glm::vec3 R = E + (float(200)*(PmE))/float(glm::length(PmE));
glm::vec3 direction = glm::normalize(R);
//major performance cliff at this point, TODO: find out why!
ray r;
r.origin = eye;
r.direction = direction;
return r;
}
//Kernel that blacks out a given image buffer
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = glm::vec3(0,0,0);
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
__global__ void raytoColorbouncecopy(glm::vec2 resolution,glm::vec3* colBounce, ray* r, int num,int blockdim)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * blockdim* blockDim.x );
if(index < num)
{
int xx = r[index].x;
int yy = r[index].y;
int newindex = xx + (yy * resolution.x);
if(r[index].life == true && r[index].rcolor[0] !=0 && r[index].rcolor[1] !=0 && r[index].rcolor[2] !=0)
colBounce[newindex] = r[index].rcolor ;
//if(r[newindex].life == true)
/// r[newindex].rcolor = glm::vec3(1,1,1);
}
}
__global__ void finalizeraycolor(glm::vec2 resolution,glm::vec3* colBounce,glm::vec3* colors,float iters)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
//int xx = r[index].x;
//int yy = r[index].y;
// int newindex = xx + (yy * resolution.x);
if(iters < 0.05)
iters = 1.0f;
if((x<=resolution.x && y<=resolution.y)){
/*colIters[index] = colIters[index] + colBounce[index];
colIters[index][0] = ((colIters[index][0] * (iters - 1)) + colBounce[index][0]) / iters ;
colIters[index][1] = ((colIters[index][1] * (iters - 1)) + colBounce[index][1]) / iters ;
colIters[index][2] = ((colIters[index][2] * (iters - 1)) + colBounce[index][2]) / iters ;*/
//colors[index ][0] = (colors[index][0] + colBounce[index][0] )/(iters+1) ; //colBounce[index] ;//
//colors[index ][1] = (colors[index][1] + colBounce[index][1] )/(iters+1) ;
//colors[index ][2] = (colors[index][2] + colBounce[index][2] )/(iters+1) ;
colors[index ] = (colors[index ] *(iters-1) + colBounce[index])/ (iters) ;
// colBounce[index ] = glm::vec3(1,1,1);
//r[index ].rcolor = glm::vec3(1,1,1);
}
}
__global__ void initializeray(glm::vec2 resolution, float time,cameraData cam, ray* r,glm::vec3* colBounce,glm::vec3* colIters){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if((x<=resolution.x && y<=resolution.y)){
ray rnew = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
// Depth of Field
// glm::vec3 dofRayPoint = rnew.origin + 14.0f * glm::normalize(rnew.direction) ;
// thrust::default_random_engine rng (hash (time ));
// thrust::uniform_real_distribution<float> xi6(-1,1);
// thrust::uniform_real_distribution<float> xi7(-1,1);
// thrust::uniform_real_distribution<float> r1(-1.0,1.0);
//// srand(time);
// float dx = r1(rng) ;//* cos(xi6(rng)); //((int)xi6(rng) % 100 + 1 )/1000;//
// float dy = r1(rng) ;//* sin(xi7(rng)); //((int)xi7(rng) % 100 + 1 )/1000; //
//
// rnew.origin = rnew.origin + glm::vec3(dx,dy,0.0f);
// rnew.direction = glm::normalize(dofRayPoint - rnew.origin );
r[index].direction = glm::normalize(rnew.direction);
r[index].origin = rnew.origin;
r[index].x = x ;
r[index].y = y ;
r[index].life = false ;
r[index].rcolor = glm::vec3(1,1,1);
colBounce[index] = glm::vec3(0,0,0);
colIters[index] = glm::vec3(0,0,0);
}
}
//TODO: IMPLEMENT THIS FUNCTION
//Core raytracer kernel
__global__ void raytraceRay(glm::vec2 resolution, float time, float bounce, cameraData cam, int rayDepth, glm::vec3* colors,
staticGeom* geoms, int numberOfGeoms, material* materials, int numberOfMaterials,ray* newr, glm::vec3* colBounce, int bou,int num,int blockdim,glm::vec3* myvertex, int numVertices,float *m ){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * blockdim* blockDim.x );//
if ( index < num )
{
//ray r = raycastFromCameraKernel(resolution, time, x, y, cam.position, cam.view, cam.up, cam.fov);
ray r = newr[index];
int xx = r.x;
int yy = r.y;
// int newindex = xx + (yy * blockdim);
glm::vec3 curIps;
glm::vec3 curNorm;
if((x<=resolution.x && y<=resolution.y)){
float MAX_DEPTH = 100000000000000000;
float depth = MAX_DEPTH;
int geoIndex = -1;
for(int i=0; i<numberOfGeoms; i++){
glm::vec3 intersectionPoint;
glm::vec3 intersectionNormal;
if(geoms[i].type==SPHERE){
depth = sphereIntersectionTest(geoms[i], r, intersectionPoint, intersectionNormal);
}else if(geoms[i].type==CUBE){
depth = boxIntersectionTest(geoms[i], r, intersectionPoint, intersectionNormal);
}else if(geoms[i].type==MESH){
depth = boxIntersectionTest( glm::vec3(m[1],m[5],m[3]) ,glm::vec3(m[0],m[4],m[2]),geoms[i],r,intersectionPoint, intersectionNormal);
if (depth != -1)
depth = meshIntersectionTest(geoms[i],r,myvertex,numVertices,intersectionPoint, intersectionNormal);
}else{
//lol?
}
if(depth<MAX_DEPTH && depth>-EPSILON){
MAX_DEPTH = depth;
geoIndex = i;
curIps = intersectionPoint;
curNorm = intersectionNormal;
}
}
// If you are hitting a object that is not light
if(geoIndex != -1 && materials[geoms[geoIndex].materialid].emittance < 0.01f && (r.life == false))
{
thrust::default_random_engine rng (hash (time * index * bou));
thrust::uniform_real_distribution<float> xi1(0,1);
thrust::uniform_real_distribution<float> xi2(0,1);
// If the object that you hit is not reflective
if ( materials[geoms[geoIndex].materialid].hasReflective < 0.01f && materials[geoms[geoIndex].materialid].hasRefractive < 0.01f)
{
newr[index].direction = glm::normalize(calculateRandomDirectionInHemisphere(glm::normalize(curNorm), (float)xi1(rng),(float)xi2(rng)));
newr[index].origin = curIps + newr[index].direction * 0.001f ; //glm::vec3 neyep = dips + ref1 * 0.001f ;
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].color;
}
// If the object that you hit is reflective
else if ( materials[geoms[geoIndex].materialid].hasReflective > 0.01f && materials[geoms[geoIndex].materialid].hasRefractive < 0.01f)
{
// Reflectitivity works based on probabbility of the random number generated
thrust::uniform_real_distribution<float> xi3(0,1);
float rtest = (float)xi3(rng) ;
if( rtest < materials[geoms[geoIndex].materialid].hasReflective)
{
glm::vec3 inc = glm::normalize(newr[index].direction) ;
newr[index].direction = inc - (2.0f * glm::normalize(curNorm) * (glm::dot(glm::normalize(curNorm),inc))); //glm::vec3 ref1 = lig - (2.0f * dnorm * (glm::dot(dnorm,lig)));
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].specularColor;
}
else
{
newr[index].direction = glm::normalize(calculateRandomDirectionInHemisphere(glm::normalize(curNorm), (float)xi1(rng),(float)xi2(rng)));
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].color;
}
newr[index].origin = curIps + newr[index].direction * 0.001f ; //glm::vec3 neyep = dips + ref1 * 0.001f ;
}
// If the object that you hit is refractive
if ( materials[geoms[geoIndex].materialid].hasRefractive > 0.01f)
{
thrust::uniform_real_distribution<float> xi4(0,1);
float rfr = (float)xi4(rng) ;
if (rfr < 0.7)//materials[geoms[geoIndex].materialid].hasRefractive )
{
float n1 = 1.0f;
float n2 = materials[geoms[geoIndex].materialid].hasRefractive;
float angleofincidence = acos(glm::dot(newr[index].direction ,glm::normalize(curNorm))/(glm::length(newr[index].direction) * glm::length(newr[index].direction)));
angleofincidence = abs(angleofincidence * (180.0f/PI));
//float angleofreflection = asin(sin(angleofincidence) * (n1/n2));
float io = glm::dot( glm::normalize(newr[index].direction),glm::normalize(curNorm));
float criticalAngle = asin(n2/n1);// * (180.0f/PI) ;
if(io < 0.0f )
{
glm::vec3 refractedray = glm::refract(glm::normalize(newr[index].direction),glm::normalize(curNorm),(n1/n2));
newr[index].direction = glm::normalize(refractedray);
newr[index].origin = curIps + newr[index].direction * 0.001f ;
//newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].color;
}
else if(io >= 0.0f ) // && (angleofincidence < criticalAngle )
{
glm::vec3 refractedray = glm::refract(glm::normalize(newr[index].direction),-1.0f * glm::normalize(curNorm),(n2/n1));
newr[index].direction = glm::normalize(refractedray);
newr[index].origin = curIps + newr[index].direction * 0.001f ;
//newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].color;
}
}
else
{
glm::vec3 inc = glm::normalize(newr[index].direction) ;
newr[index].direction = inc - (2.0f * glm::normalize(curNorm) * (glm::dot(glm::normalize(curNorm),inc))); //glm::vec3 ref1 = lig - (2.0f * dnorm * (glm::dot(dnorm,lig)));
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].specularColor;
newr[index].origin = curIps + newr[index].direction * 0.001f ;
}
}
}
// If the ray hits an object that is light
else if(geoIndex != -1 && materials[geoms[geoIndex].materialid].emittance > 0.01f && (r.life == false))
{
newr[index].rcolor = newr[index].rcolor * materials[geoms[geoIndex].materialid].emittance;
newr[index].life = true;
}
// If the ray keeps hitting the light once it dies - This case actually never happens
else if(geoIndex != -1 && materials[geoms[geoIndex].materialid].emittance > 0.01f && (r.life == true))
{
newr[index].rcolor = newr[index].rcolor ;
newr[index].life = true;
}
// The final case where the ray does not hit any object at all
else
{
newr[index].rcolor = newr[index].rcolor * glm::vec3(0,0,0);
newr[index].life = true;
}
}
}
}
//A thrust based structure
struct is_dead
{
__host__ __device__
bool operator()(const ray r)
{
return r.life;
}
};
//TODO: FINISH THIS FUNCTION
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms,std::vector<glm::vec3> mypoints,float *maxmin ){
int traceDepth = 1; //determines how many bounces the raytracer traces
// set up crucial magic
int tileSize = 8;
int numVertices = mypoints.size();
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)) , (int)ceil(float(renderCam->resolution.y)/float(tileSize)));
//send image to GPU
glm::vec3* cudaimage = NULL;
cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice);
//Send vertices of the mesh to GPU
glm::vec3* mvertex = NULL;
cudaMalloc((void**)&mvertex,mypoints.size() * sizeof(glm::vec3));
for(int i=0; i < mypoints.size(); i++){
cudaMemcpy( &mvertex[i] , &mypoints[i], sizeof(glm::vec3), cudaMemcpyHostToDevice);
}
//Send maxmins of the mesh to GPU
float* mami = NULL;
cudaMalloc((void**)&mami,6 * sizeof(float));
if(maxmin != NULL)
{
for(int i=0; i < 6; i++){
cudaMemcpy( &mami[i] , &maxmin[i], sizeof(float), cudaMemcpyHostToDevice);
}
}
//package geometry and materials and sent to GPU
staticGeom* geomList = new staticGeom[numberOfGeoms];
for(int i=0; i<numberOfGeoms; i++){
staticGeom newStaticGeom;
newStaticGeom.type = geoms[i].type;
newStaticGeom.materialid = geoms[i].materialid;
newStaticGeom.translation = geoms[i].translations[frame];
newStaticGeom.rotation = geoms[i].rotations[frame];
newStaticGeom.scale = geoms[i].scales[frame];
newStaticGeom.transform = geoms[i].transforms[frame];
newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame];
geomList[i] = newStaticGeom;
}
staticGeom* cudageoms = NULL;
cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom));
cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice);
material* cudamaterials = NULL;
cudaMalloc((void**)&cudamaterials, numberOfMaterials*sizeof(material));
cudaMemcpy( cudamaterials, materials, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice);
//package camera
cameraData cam;
cam.resolution = renderCam->resolution;
cam.position = renderCam->positions[frame];
cam.view = renderCam->views[frame];
cam.up = renderCam->ups[frame];
cam.fov = renderCam->fov;
//Allocate memory for ray pool
ray* raypool = NULL;
cudaMalloc((void**)&raypool, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(ray));
//Allocate memory to store color for bounces
glm::vec3* colorBounce = NULL;
cudaMalloc((void**)&colorBounce, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
//Allocate memory to store color for each iteration accumulation
glm::vec3* colorIters = NULL;
cudaMalloc((void**)&colorIters , (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3));
//Initialize the ray values
initializeray<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution,(float)iterations,cam,raypool,colorBounce,colorIters);
cudaThreadSynchronize();
////kernel launches
// const int N = 6;
// int A[N] = {1, 4, 2, 8, 5, 7};
// int *new_end = thrust::remove_if(A, A + N, is_even());
// ray* raystart = new ray[N] ;
//cudaMemcpy( raystart, raypool, N*sizeof(ray), cudaMemcpyDeviceToHost);
//for(int j=0 ; j < N ; j++)
// std::cout << raystart[j].life ;
//delete [] raystart;
//Super-sampled antialiasing code
srand(iterations);
float x = 0.0f , y = 0.0f ;
if(iterations%20 == 0 )
{
x = (rand() % 100 + 1)/1000.0f;
y = (rand() % 100 + 1)/1000.0f;
cam.position[0] +=x;
cam.position[1] +=y;
}
// Motion blur
//int mID = 5; // this gives the object id that should be moved
//float raa = (rand() % 10 + 1 )/ 10.0f ;
//float xtrans = (2.0f * (1.0f - raa)) + (3.0f * raa) ;
//geoms[mID].translations[0][0] = xtrans; // comment this line for motion blur on and off
//glm::mat4 buildTransformationMatrix(glm::vec3 translation, glm::vec3 rotation, glm::vec3 scale);
//geoms[mID].transforms[0] = utilityCore::glmMat4ToCudaMat4(utilityCore::buildTransformationMatrix(geoms[mID].translations[0],geoms[mID].rotations[0],geoms[mID].scales[0]));
//geoms[mID].inverseTransforms[0] = utilityCore::glmMat4ToCudaMat4(glm::inverse(utilityCore::cudaMat4ToGlmMat4(geoms[mID].transforms[0])));
//create events
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
cudaEventRecord(event1, 0);
int N = ((int)renderCam->resolution.x*(int)renderCam->resolution.y);
dim3 StreamBlocksPerGrid = fullBlocksPerGrid ;
int blockdim = fullBlocksPerGrid.x ;
for(int bounce = 1; bounce <=25; ++bounce)
{
raytraceRay<<<StreamBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution, (float)iterations, (float)bounce, cam, traceDepth, cudaimage, cudageoms, numberOfGeoms, cudamaterials, numberOfMaterials,raypool,colorBounce,bounce,N,blockdim,mvertex,numVertices,mami);
raytoColorbouncecopy<<<StreamBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution,colorBounce,raypool,N,blockdim);
thrust::device_ptr<ray> rptr = thrust::device_pointer_cast(raypool);
ray *endrptr = thrust::remove_if(rptr,rptr + N , is_dead()).get();
N = endrptr - raypool ;
int numofBlocks = ceil((float)N / (float)(tileSize * tileSize)) ;
blockdim = ceil(sqrt((float)numofBlocks));
StreamBlocksPerGrid = dim3(blockdim,blockdim);
cudaThreadSynchronize();
}
finalizeraycolor<<<fullBlocksPerGrid, threadsPerBlock>>>(renderCam->resolution,colorBounce,cudaimage,(float)iterations);
cudaThreadSynchronize();
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage);
cudaEventRecord(event2, 0);
//synchronize
cudaEventSynchronize(event1); //optional
cudaEventSynchronize(event2); //wait for the event to be executed!
//calculate time
float dt_ms;
cudaEventElapsedTime(&dt_ms, event1, event2);
std::cout << dt_ms << std::endl ;
//retrieve image from GPU
cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost);
//free up stuff, or else we'll leak memory like a madman
cudaFree( cudaimage );
cudaFree( cudageoms );
cudaFree( cudamaterials );
cudaFree(colorBounce);
cudaFree(colorIters);
cudaFree(raypool);
delete [] geomList;
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("Kernel failed!");
}
float __device__ meshIntersectionTest(staticGeom curGeom,ray s,glm::vec3* myvertex, int numVertices, glm::vec3& mintersect, glm::vec3& mnormal)
{
glm::vec3 ipss,normss;
float t , at = 12345.0;
glm::vec3 curnorm , curipss;
for(int k=0 ;k < numVertices - 2 ; k= k+3)
{
t = triangleIntersectionTest(curGeom,s,myvertex[k],myvertex[k+1],myvertex[k+2], ipss, normss);
if(t != -1 && t<at)
{
curnorm = normss;
curipss = ipss;
at = t;
}
}
mnormal = curnorm;
mintersect = curipss;
if (at == 12345.0)
return -1;
else
return at ;
}
|
greyscale.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "calc.cpp"
#include "utils.h"
#include <stdio.h>
#include <algorithm>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
vector<float> xi; //make a temporary list-vector
for(int k=index_x-5/2;k<=index_x+5/2;k++) { //apply the window specified by x and y
for(int m=index_y-5/2;m<=index_y+5/2;m++) {
if((k<0)||(m<0)) xi.push_back(0); //on edges of the image use 0 values
else xi.push_back(rgbaImage[k * numCols + m]);
}
}
std::sort(std::begin(xi),std::end(xi)); //sort elements of 'xi' neighbourhood vector
greyImage[index]=xi[3]; //replace pixel with element specified by 'rank' (3)
// write out the final result
greyImage[index] = .299f * rgbaImage[index].x + .587f * rgbaImage[index].y + .114f * rgbaImage[index].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const int thread = 16;
const dim3 blockSize( thread, thread, 1);
const dim3 gridSize( ceil(numRows/(float)thread), ceil(numCols/(float)thread), 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| greyscale.cu | #include <math.h>
#include "calc.cpp"
#include "utils.h"
#include <stdio.h>
#include <algorithm>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
// map the two 2D indices to a single linear, 1D index
int grid_width = gridDim.x * blockDim.x;
int index = index_y * grid_width + index_x;
vector<float> xi; //make a temporary list-vector
for(int k=index_x-5/2;k<=index_x+5/2;k++) { //apply the window specified by x and y
for(int m=index_y-5/2;m<=index_y+5/2;m++) {
if((k<0)||(m<0)) xi.push_back(0); //on edges of the image use 0 values
else xi.push_back(rgbaImage[k * numCols + m]);
}
}
std::sort(std::begin(xi),std::end(xi)); //sort elements of 'xi' neighbourhood vector
greyImage[index]=xi[3]; //replace pixel with element specified by 'rank' (3)
// write out the final result
greyImage[index] = .299f * rgbaImage[index].x + .587f * rgbaImage[index].y + .114f * rgbaImage[index].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const int thread = 16;
const dim3 blockSize( thread, thread, 1);
const dim3 gridSize( ceil(numRows/(float)thread), ceil(numCols/(float)thread), 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
604fbd8e0616a44da0d0f3696d239d3a440316bf.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 604fbd8e0616a44da0d0f3696d239d3a440316bf.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
cb484a5d4832a0c7c7e77de62abcf3a1b6b6b8c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mult), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mult), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mult), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cb484a5d4832a0c7c7e77de62abcf3a1b6b6b8c7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mult<<<gridBlock,threadBlock>>>(A,B,C);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mult<<<gridBlock,threadBlock>>>(A,B,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mult<<<gridBlock,threadBlock>>>(A,B,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
aec7d8f2818c0dc93d68fb20b7ebdc6370dfd062.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "MaleckarBacNavglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedefMaleckar.h"
#include "parseInput.h"
#include "MaleckarBacNavhostPrototypes.h"
#include "MaleckarBacNavdevicePrototypes.cuh"
real MaleckarBacNav_RestVoltage = MaleckarBacNav_RestVoltage_0;
__device__ real gkv = gkv_0;
__device__ real shiftrs = shiftrs_0;
__device__ real GbNa = GbNa_0;
__device__ real GNA = GNA_0;
void Maleckar_init(char** res) {
rword resources[] = {
{ "MaleckarBacNav_Node", 1100 },
{ "MaleckarBacNav_Nodetype",1100 },
{ "MaleckarBacNav_Type", 1100 },
{ "MaleckarBacNav_patch", 1102 },
{ "MaleckarBacNav_Vr", 1007 },
{ "MaleckarBacNav_Vrest", 1007 },
{ "MaleckarBacNav_gkv", 1008 },
{ "MaleckarBacNav_shiftrs", 1009 },
{ "MaleckarBacNav_gbna", 1010 },
{ "MaleckarBacNav_gna", 1011 },
{ NULL, 0 }
};
int i, j, c;
int cmd;
real temp;
i = 0;
while( res[i] != NULL ) {
cmd = FindCommand( resources, res[i] );
switch( cmd ) {
case 1007:
MaleckarBacNav_RestVoltage = GetRealValue( res[i] );
break;
case 1008:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(gkv, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1009:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(shiftrs, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1010:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(GbNa, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1011:
temp = GetRealValue(res[i]);
hipMemcpyToSymbol(GNA, (void *)&temp, sizeof(real), 0, hipMemcpyHostToDevice);
break;
case 1100:
//MaleckarBacNav_NodeType = GetByteValue( res[i] );
break;
case 1102:
/*iv = GetRealArray( res[i] );
p = (real*)(&MaleckarBacNav_RestPatch);
c = GetNumValues( res[i] );
if( c > MaleckarBacNav_PatchSize ) {
c = MaleckarBacNav_PatchSize;
}
for(j=0;j<c;j++) {
p[j] = iv[j];
}
break;*/
}
i++;
}
}
void Maleckar_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
hipHostMalloc((void**)&(gate_h->vm), memSize, 0);
hipHostMalloc((void**)&(gate_h->r), memSize, 0);
hipHostMalloc((void**)&(gate_h->s), memSize, 0);
hipHostMalloc((void**)&(gate_h->m), memSize, 0);
hipHostMalloc((void**)&(gate_h->h), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_dev->vm, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->r, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->s, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->m, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_dev->h, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(hipMallocPitch((void **)&gate_devF->vm, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->r, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->s, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->m, pitch,
memSize, 1));
CudaSafeCall(hipMallocPitch((void **)&gate_devF->h, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->vm[idx] = MaleckarBacNav_RestVoltage;
gate_h->r[idx] = 5.454e-02;
gate_h->s[idx] = 9.814e-01;
gate_h->m[idx] = 0.00012037;
gate_h->h[idx] = .4769;
}
CudaSafeCall(hipMemcpy2D((void *)gate_dev->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->r, *pitch, (void *)gate_h->r,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_dev->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->r, *pitch, (void *)gate_h->r,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, hipMemcpyHostToDevice));
CudaSafeCall(hipMemcpy2D((void *)gate_devF->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, hipMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->r;
qpH[i++] = gate_devF->s;
qpH[i++] = gate_devF->m;
qpH[i++] = gate_devF->h;
CudaSafeCall(hipMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->r;
qpH[i++] = gate_dev->s;
qpH[i++] = gate_dev->m;
qpH[i++] = gate_dev->h;
CudaSafeCall(hipMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, hipMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void Maleckar_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(hipMemcpy2D((void *)gate_h->vm, memSize, (void *)gate_dev->vm,
pitch, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->r, memSize, (void *)gate_dev->r,
pitch, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->s, memSize, (void *)gate_dev->s,
pitch, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->m, memSize, (void *)gate_dev->m,
pitch, memSize, 1, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy2D((void *)gate_h->h, memSize, (void *)gate_dev->h,
pitch, memSize, 1, hipMemcpyDeviceToHost));
}
void Maleckar_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
hipHostFree(gate_h->vm); hipHostFree(gate_h->r); hipHostFree(gate_h->s); hipHostFree(gate_h->m); hipHostFree(gate_h->h); hipFree(gate_dev->qp);
hipFree(gate_dev->vm); hipFree(gate_dev->r); hipFree(gate_dev->s); hipFree(gate_dev->m); hipFree(gate_dev->h); hipFree(gate_dev->qp);
hipFree(gate_devF->vm); hipFree(gate_devF->r); hipFree(gate_devF->s); hipFree(gate_devF->m); hipFree(gate_devF->h); hipFree(gate_devF->qp);
hipFree(cudaMatrixINT->type);
hipFree(cudaMatrixINT->rows);
hipFree(cudaMatrixINT->maxnz);
hipFree(cudaMatrixINT->csep);
hipFree(cudaMatrixINT->jcoef);
hipFree(cudaMatrixINT->coef);
}
void __device__ GetFDev_Maleckar(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
real vm;
real r, s, m, h;
real r_inf, s_inf, tau_r, tau_s, minf, hinf, taum, tauh, aK1, bK1, ENa, EK;
real Ikv, Ik1, Inak, Ibna, Iion, INaBacNav;
real vm = g_dev.vm[i2d];
real r = g_dev.r[i2d];
real s = g_dev.s[i2d];
real m = g_dev.m[i2d];
real h = g_dev.h[i2d];
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real fv = g_devF.vm[i2d];
/* gating variables */
r_inf = 1/(1+exp(-(vm)/11));
tau_r = 20.3 + 138 * exp( -sqr((vm+20)/25.9) );
s_inf = 1/(1+exp((vm+3)/7));
tau_s = 1574 + 5268 * exp( -sqr((vm+23)/22.7) );
minf = (1.0/(1.0+exp((vm+28.34)/(-5.33))));
hinf = (1.0-1.0/(1.0+exp((-77.21-vm)/8.32)));
taum = (96.37/(exp((vm+82.74)/17.64) + exp(-(vm+6.008)/3.337)) + .4844);
tauh = (96.17 - (96.17 - 10.45)/(1.0+exp((-23.26-vm)/2.529)));
/* I_Kv */
EK = R*T/F * log(Ko/Ki);
Ikv = gkv * r * s * (vm-EK);
/* I_K1 */
aK1 = 0.1/(1+exp(0.06*(vm-EK-200)));
bK1 = ( 3*exp(0.0002*(vm-EK+100)) + exp(0.1*(vm-EK-10)) )
/ ( 1+exp(-0.5*(vm-EK)) );
Ik1 = gK1 * aK1/(aK1+bK1) * (vm-EK);
/* I_NaK */
Inak = INaKbar / (1+KmK/Ko) / (1+pow((KmNa/Nai),1.5)) * (vm-Vrev) / (vm-B);
/* I_bNa */
ENa = R*T/F * log(Nao/Nai);
Ibna = GbNa * (vm-ENa);
/* INaBacNav */
INaBacNav = GNA*m*m*m*h*(vm-ENa);
/* I_ion */
Iion = Ikv + Ik1 + Inak + INaBacNav + Ibna;
/* differential equations */
fv -= Cm*Iion;
g_devF.r[i2d] = (r_inf - r) / tau_r;
g_devF.s[i2d] = (s_inf - s) / tau_s;
g_devF.m[i2d] = (m_inf - m) / tau_m;
g_devF.h[i2d] = (h_inf - h) / tau_h;
g_devF.vm[i2d] = fv;
} | aec7d8f2818c0dc93d68fb20b7ebdc6370dfd062.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../common/CudaSafeCall.h"
#include "MaleckarBacNavglobalVariables.cuh"
#include "typedefSparse.h"
#include "sparsePrototypes.cuh"
#include "typedefMaleckar.h"
#include "parseInput.h"
#include "MaleckarBacNavhostPrototypes.h"
#include "MaleckarBacNavdevicePrototypes.cuh"
real MaleckarBacNav_RestVoltage = MaleckarBacNav_RestVoltage_0;
__device__ real gkv = gkv_0;
__device__ real shiftrs = shiftrs_0;
__device__ real GbNa = GbNa_0;
__device__ real GNA = GNA_0;
void Maleckar_init(char** res) {
rword resources[] = {
{ "MaleckarBacNav_Node", 1100 },
{ "MaleckarBacNav_Nodetype",1100 },
{ "MaleckarBacNav_Type", 1100 },
{ "MaleckarBacNav_patch", 1102 },
{ "MaleckarBacNav_Vr", 1007 },
{ "MaleckarBacNav_Vrest", 1007 },
{ "MaleckarBacNav_gkv", 1008 },
{ "MaleckarBacNav_shiftrs", 1009 },
{ "MaleckarBacNav_gbna", 1010 },
{ "MaleckarBacNav_gna", 1011 },
{ NULL, 0 }
};
int i, j, c;
int cmd;
real temp;
i = 0;
while( res[i] != NULL ) {
cmd = FindCommand( resources, res[i] );
switch( cmd ) {
case 1007:
MaleckarBacNav_RestVoltage = GetRealValue( res[i] );
break;
case 1008:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(gkv, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1009:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(shiftrs, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1010:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(GbNa, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1011:
temp = GetRealValue(res[i]);
cudaMemcpyToSymbol(GNA, (void *)&temp, sizeof(real), 0, cudaMemcpyHostToDevice);
break;
case 1100:
//MaleckarBacNav_NodeType = GetByteValue( res[i] );
break;
case 1102:
/*iv = GetRealArray( res[i] );
p = (real*)(&MaleckarBacNav_RestPatch);
c = GetNumValues( res[i] );
if( c > MaleckarBacNav_PatchSize ) {
c = MaleckarBacNav_PatchSize;
}
for(j=0;j<c;j++) {
p[j] = iv[j];
}
break;*/
}
i++;
}
}
void Maleckar_gateinit(int memSize, size_t* pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF) {
cudaHostAlloc((void**)&(gate_h->vm), memSize, 0);
cudaHostAlloc((void**)&(gate_h->r), memSize, 0);
cudaHostAlloc((void**)&(gate_h->s), memSize, 0);
cudaHostAlloc((void**)&(gate_h->m), memSize, 0);
cudaHostAlloc((void**)&(gate_h->h), memSize, 0);
// Allocate device memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->vm, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->r, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->s, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->m, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_dev->h, pitch,
memSize, 1));
// Allocate device forward memory arrays
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->vm, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->r, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->s, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->m, pitch,
memSize, 1));
CudaSafeCall(cudaMallocPitch((void **)&gate_devF->h, pitch,
memSize, 1));
puts("\nFinished allocating device arrays\n");
int totpoints = (int)memSize / sizeof(real);
for (int idx = 0; idx < totpoints; idx++) {
gate_h->vm[idx] = MaleckarBacNav_RestVoltage;
gate_h->r[idx] = 5.454e-02;
gate_h->s[idx] = 9.814e-01;
gate_h->m[idx] = 0.00012037;
gate_h->h[idx] = .4769;
}
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->r, *pitch, (void *)gate_h->r,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_dev->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->vm, *pitch, (void *)gate_h->vm,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->r, *pitch, (void *)gate_h->r,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->s, *pitch, (void *)gate_h->s,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->m, *pitch, (void *)gate_h->m,
memSize, memSize, 1, cudaMemcpyHostToDevice));
CudaSafeCall(cudaMemcpy2D((void *)gate_devF->h, *pitch, (void *)gate_h->h,
memSize, memSize, 1, cudaMemcpyHostToDevice));
real** qpH = (real**)malloc(sizeof(real *)*gate_h->qpl);
int i = 0;
qpH[i++] = gate_devF->r;
qpH[i++] = gate_devF->s;
qpH[i++] = gate_devF->m;
qpH[i++] = gate_devF->h;
CudaSafeCall(cudaMemcpy((void *)gate_devF->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
i = 0;
qpH[i++] = gate_dev->r;
qpH[i++] = gate_dev->s;
qpH[i++] = gate_dev->m;
qpH[i++] = gate_dev->h;
CudaSafeCall(cudaMemcpy((void *)gate_dev->qp, (void*)qpH, sizeof(real *)*gate_h->qpl, cudaMemcpyHostToDevice));
CudaCheckError();
puts("\nFinished initializing device arrays\n");
}
void Maleckar_sync(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev) {
CudaSafeCall(cudaMemcpy2D((void *)gate_h->vm, memSize, (void *)gate_dev->vm,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->r, memSize, (void *)gate_dev->r,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->s, memSize, (void *)gate_dev->s,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->m, memSize, (void *)gate_dev->m,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy2D((void *)gate_h->h, memSize, (void *)gate_dev->h,
pitch, memSize, 1, cudaMemcpyDeviceToHost));
}
void Maleckar_exit(int memSize, size_t pitch, gateType* gate_h, gateType* gate_dev, gateType* gate_devF, sparse* MatrixINT, cudasparse* cudaMatrixINT){
// Free gate host and device memory
cudaFreeHost(gate_h->vm); cudaFreeHost(gate_h->r); cudaFreeHost(gate_h->s); cudaFreeHost(gate_h->m); cudaFreeHost(gate_h->h); cudaFree(gate_dev->qp);
cudaFree(gate_dev->vm); cudaFree(gate_dev->r); cudaFree(gate_dev->s); cudaFree(gate_dev->m); cudaFree(gate_dev->h); cudaFree(gate_dev->qp);
cudaFree(gate_devF->vm); cudaFree(gate_devF->r); cudaFree(gate_devF->s); cudaFree(gate_devF->m); cudaFree(gate_devF->h); cudaFree(gate_devF->qp);
cudaFree(cudaMatrixINT->type);
cudaFree(cudaMatrixINT->rows);
cudaFree(cudaMatrixINT->maxnz);
cudaFree(cudaMatrixINT->csep);
cudaFree(cudaMatrixINT->jcoef);
cudaFree(cudaMatrixINT->coef);
}
void __device__ GetFDev_Maleckar(int i2d, int pitch, real beta, real Cm, real t, real dt, int totpoints, real rx, gateType g_dev, gateType g_devF) {
/*------------------------------------------------------------------------
* return if outside domain
*------------------------------------------------------------------------
*/
if (i2d >= totpoints) {
return;
}
real vm;
real r, s, m, h;
real r_inf, s_inf, tau_r, tau_s, minf, hinf, taum, tauh, aK1, bK1, ENa, EK;
real Ikv, Ik1, Inak, Ibna, Iion, INaBacNav;
real vm = g_dev.vm[i2d];
real r = g_dev.r[i2d];
real s = g_dev.s[i2d];
real m = g_dev.m[i2d];
real h = g_dev.h[i2d];
/*------------------------------------------------------------------------
* setting local variables
*------------------------------------------------------------------------
*/
real fv = g_devF.vm[i2d];
/* gating variables */
r_inf = 1/(1+exp(-(vm)/11));
tau_r = 20.3 + 138 * exp( -sqr((vm+20)/25.9) );
s_inf = 1/(1+exp((vm+3)/7));
tau_s = 1574 + 5268 * exp( -sqr((vm+23)/22.7) );
minf = (1.0/(1.0+exp((vm+28.34)/(-5.33))));
hinf = (1.0-1.0/(1.0+exp((-77.21-vm)/8.32)));
taum = (96.37/(exp((vm+82.74)/17.64) + exp(-(vm+6.008)/3.337)) + .4844);
tauh = (96.17 - (96.17 - 10.45)/(1.0+exp((-23.26-vm)/2.529)));
/* I_Kv */
EK = R*T/F * log(Ko/Ki);
Ikv = gkv * r * s * (vm-EK);
/* I_K1 */
aK1 = 0.1/(1+exp(0.06*(vm-EK-200)));
bK1 = ( 3*exp(0.0002*(vm-EK+100)) + exp(0.1*(vm-EK-10)) )
/ ( 1+exp(-0.5*(vm-EK)) );
Ik1 = gK1 * aK1/(aK1+bK1) * (vm-EK);
/* I_NaK */
Inak = INaKbar / (1+KmK/Ko) / (1+pow((KmNa/Nai),1.5)) * (vm-Vrev) / (vm-B);
/* I_bNa */
ENa = R*T/F * log(Nao/Nai);
Ibna = GbNa * (vm-ENa);
/* INaBacNav */
INaBacNav = GNA*m*m*m*h*(vm-ENa);
/* I_ion */
Iion = Ikv + Ik1 + Inak + INaBacNav + Ibna;
/* differential equations */
fv -= Cm*Iion;
g_devF.r[i2d] = (r_inf - r) / tau_r;
g_devF.s[i2d] = (s_inf - s) / tau_s;
g_devF.m[i2d] = (m_inf - m) / tau_m;
g_devF.h[i2d] = (h_inf - h) / tau_h;
g_devF.vm[i2d] = fv;
} |
966cc28f784acd474e5d092964ec9ac50d24e48c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
#include "KMeansHeader.h"
__global__ void pointsMovementCalKernel(int size, double* dev_initPointsCordinates,double* dev_pointsVelocityArr, double* dev_currentPointsCordinates, double time)
{
int processId = threadIdx.x;
dev_currentPointsCordinates[processId] = dev_initPointsCordinates[processId] + (dev_pointsVelocityArr[processId] * time);
}
boolean calPointsCordsCuda(double time, double* initPointsCordinates, double* pointsVelocityArr, double* currentPointsCordniates, int size)
{
hipError_t cudaStatus;
int counter = 0;
cudaStatus = computePointsCordinates(time,initPointsCordinates, pointsVelocityArr, currentPointsCordniates, size);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!"); fflush(stdout);
return FALSE;
}
return TRUE;
}
void error(double* dev_currentPointsCordinates, double* dev_pointsVelocityArr, double* dev_initPointsCordinates)
{
hipFree(dev_currentPointsCordinates);
hipFree(dev_pointsVelocityArr);
hipFree(dev_initPointsCordinates);
}
hipError_t computePointsCordinates(double time,double* initPointsCordinates , double* pointsVelocityArr, double* currentPointsCordniates, int size)
{
hipError_t cudaStatus;
double* dev_currentPointsCordinates = 0;
double* dev_pointsVelocityArr = 0;
double* dev_initPointsCordinates = 0;
int parts = size / 1000;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
// Allocate GPU buffers for Points vector .
cudaStatus = hipMalloc((void**)&dev_currentPointsCordinates, size * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
cudaStatus = hipMalloc((void**)&dev_pointsVelocityArr, size * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
cudaStatus = hipMalloc((void**)&dev_initPointsCordinates, size * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
// Copy input vectors from host memory to GPU buffers.
//cudaStatus = hipMemcpy(dev_middleResultArr, middleResultArr, RANGE_SIZE * NUM_OF_THREADS * sizeof(int), hipMemcpyHostToDevice);
cudaStatus = hipMemcpy(dev_currentPointsCordinates, currentPointsCordniates, size * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
cudaStatus = hipMemcpy(dev_pointsVelocityArr, pointsVelocityArr, size * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
cudaStatus = hipMemcpy(dev_initPointsCordinates, initPointsCordinates, size * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
pointsMovementCalKernel << <parts, size/parts >> >(size, dev_initPointsCordinates, dev_pointsVelocityArr, dev_currentPointsCordinates, time);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
error(dev_currentPointsCordinates, dev_pointsVelocityArr , dev_initPointsCordinates);
}
return cudaStatus;
}
| 966cc28f784acd474e5d092964ec9ac50d24e48c.cu | //
#include "KMeansHeader.h"
__global__ void pointsMovementCalKernel(int size, double* dev_initPointsCordinates,double* dev_pointsVelocityArr, double* dev_currentPointsCordinates, double time)
{
int processId = threadIdx.x;
dev_currentPointsCordinates[processId] = dev_initPointsCordinates[processId] + (dev_pointsVelocityArr[processId] * time);
}
boolean calPointsCordsCuda(double time, double* initPointsCordinates, double* pointsVelocityArr, double* currentPointsCordniates, int size)
{
cudaError_t cudaStatus;
int counter = 0;
cudaStatus = computePointsCordinates(time,initPointsCordinates, pointsVelocityArr, currentPointsCordniates, size);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!"); fflush(stdout);
return FALSE;
}
return TRUE;
}
void error(double* dev_currentPointsCordinates, double* dev_pointsVelocityArr, double* dev_initPointsCordinates)
{
cudaFree(dev_currentPointsCordinates);
cudaFree(dev_pointsVelocityArr);
cudaFree(dev_initPointsCordinates);
}
cudaError_t computePointsCordinates(double time,double* initPointsCordinates , double* pointsVelocityArr, double* currentPointsCordniates, int size)
{
cudaError_t cudaStatus;
double* dev_currentPointsCordinates = 0;
double* dev_pointsVelocityArr = 0;
double* dev_initPointsCordinates = 0;
int parts = size / 1000;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
// Allocate GPU buffers for Points vector .
cudaStatus = cudaMalloc((void**)&dev_currentPointsCordinates, size * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
cudaStatus = cudaMalloc((void**)&dev_pointsVelocityArr, size * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
cudaStatus = cudaMalloc((void**)&dev_initPointsCordinates, size * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
// Copy input vectors from host memory to GPU buffers.
//cudaStatus = cudaMemcpy(dev_middleResultArr, middleResultArr, RANGE_SIZE * NUM_OF_THREADS * sizeof(int), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(dev_currentPointsCordinates, currentPointsCordniates, size * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
cudaStatus = cudaMemcpy(dev_pointsVelocityArr, pointsVelocityArr, size * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
cudaStatus = cudaMemcpy(dev_initPointsCordinates, initPointsCordinates, size * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!"); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
pointsMovementCalKernel << <parts, size/parts >> >(size, dev_initPointsCordinates, dev_pointsVelocityArr, dev_currentPointsCordinates, time);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); fflush(stdout);
error(dev_currentPointsCordinates, dev_pointsVelocityArr, dev_initPointsCordinates);
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
error(dev_currentPointsCordinates, dev_pointsVelocityArr , dev_initPointsCordinates);
}
return cudaStatus;
}
|
b203474b01b21fa823f01b165e65c73e95805f9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _CONV_CUDA_KERNEL
#define _CONV_CUDA_KERNEL
#include "cta_config.h"
__global__ void Conv3x3(float* input, float* kernel, float* output,
int num_rows, int num_cols) {
int bidx = blockIdx.x;
int tidx = threadIdx.x;
int tidy = threadIdx.y;
__shared__ float tmp_kernel[9];
if (tidy == 0) {
if (tidx < 9) {
tmp_kernel[tidx] = kernel[tidx];
}
}
__syncthreads();
int num_matrix_blocks = (num_rows * num_cols) / (BLOCK_SIZE * BLOCK_SIZE);
__shared__ float tmp_buffer[BLOCK_SIZE][BLOCK_SIZE];
for (int block_id = bidx; block_id < num_matrix_blocks; block_id += gridDim.x) {
for (int y = 0; y < BLOCK_SIZE; y += NUM_THREADS_Y) {
tmp_buffer[y + tidy][tidx] = input[
(y / NUM_THREADS_Y) * NUM_THREADS_Y * NUM_THREADS_X * num_matrix_blocks
+ block_id * NUM_THREADS_Y * NUM_THREADS_X
+ tidy * NUM_THREADS_X + tidx];
}
__syncthreads();
for (int y = 0; y < BLOCK_SIZE; y += NUM_THREADS_Y) {
float sum_val = 0.0f;
for (int ky = 0; ky < 3; ++ky) {
for (int kx = 0; kx < 3; ++kx) {
int row_index = y + tidy + ky - 1;
int col_index = tidx + kx - 1;
CLAMP(row_index, 0, BLOCK_SIZE);
CLAMP(col_index, 0, BLOCK_SIZE);
sum_val += (tmp_buffer[row_index][col_index]
* tmp_kernel[ky * 3 + kx]);
}
}
output[
(y / NUM_THREADS_Y) * NUM_THREADS_Y * NUM_THREADS_X * num_matrix_blocks
+ block_id * NUM_THREADS_Y * NUM_THREADS_X
+ tidy * NUM_THREADS_X + tidx] = sum_val;
}
__syncthreads();
}
return;
}
#endif
| b203474b01b21fa823f01b165e65c73e95805f9d.cu | #ifndef _CONV_CUDA_KERNEL
#define _CONV_CUDA_KERNEL
#include "cta_config.h"
__global__ void Conv3x3(float* input, float* kernel, float* output,
int num_rows, int num_cols) {
int bidx = blockIdx.x;
int tidx = threadIdx.x;
int tidy = threadIdx.y;
__shared__ float tmp_kernel[9];
if (tidy == 0) {
if (tidx < 9) {
tmp_kernel[tidx] = kernel[tidx];
}
}
__syncthreads();
int num_matrix_blocks = (num_rows * num_cols) / (BLOCK_SIZE * BLOCK_SIZE);
__shared__ float tmp_buffer[BLOCK_SIZE][BLOCK_SIZE];
for (int block_id = bidx; block_id < num_matrix_blocks; block_id += gridDim.x) {
for (int y = 0; y < BLOCK_SIZE; y += NUM_THREADS_Y) {
tmp_buffer[y + tidy][tidx] = input[
(y / NUM_THREADS_Y) * NUM_THREADS_Y * NUM_THREADS_X * num_matrix_blocks
+ block_id * NUM_THREADS_Y * NUM_THREADS_X
+ tidy * NUM_THREADS_X + tidx];
}
__syncthreads();
for (int y = 0; y < BLOCK_SIZE; y += NUM_THREADS_Y) {
float sum_val = 0.0f;
for (int ky = 0; ky < 3; ++ky) {
for (int kx = 0; kx < 3; ++kx) {
int row_index = y + tidy + ky - 1;
int col_index = tidx + kx - 1;
CLAMP(row_index, 0, BLOCK_SIZE);
CLAMP(col_index, 0, BLOCK_SIZE);
sum_val += (tmp_buffer[row_index][col_index]
* tmp_kernel[ky * 3 + kx]);
}
}
output[
(y / NUM_THREADS_Y) * NUM_THREADS_Y * NUM_THREADS_X * num_matrix_blocks
+ block_id * NUM_THREADS_Y * NUM_THREADS_X
+ tidy * NUM_THREADS_X + tidx] = sum_val;
}
__syncthreads();
}
return;
}
#endif
|
a6ba15a867fdc6e7088fe4e702b97555b8b76e0f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/homogeneity_score.cuh>
#include <raft/stats/mutual_info_score.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct homogeneityParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class homogeneityTest : public ::testing::TestWithParam<homogeneityParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<homogeneityParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
stream = resource::get_cuda_stream(handle);
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream);
// calculating the golden output
double truthMI, truthEntropy;
truthMI = raft::stats::mutual_info_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthEntropy = raft::stats::entropy(
truthClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream);
if (truthEntropy) {
truthHomogeneity = truthMI / truthEntropy;
} else
truthHomogeneity = 1.0;
if (nElements == 0) truthHomogeneity = 1.0;
// calling the homogeneity CUDA implementation
computedHomogeneity = raft::stats::homogeneity_score(
handle,
raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(predClusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange);
}
// declaring the data values
raft::resources handle;
homogeneityParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthHomogeneity = 0;
double computedHomogeneity = 0;
hipStream_t stream = 0;
};
// setting test parameter values
const std::vector<homogeneityParam> inputs = {{199, 1, 10, false, 0.000001},
{200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001},
{10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001},
{300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001},
{200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001},
{10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001},
{300, 3, 99, true, 0.000001}};
// writing the test suite
typedef homogeneityTest<int> homogeneityTestClass;
TEST_P(homogeneityTestClass, Result)
{
ASSERT_NEAR(computedHomogeneity, truthHomogeneity, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(homogeneity, homogeneityTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
| a6ba15a867fdc6e7088fe4e702b97555b8b76e0f.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <algorithm>
#include <gtest/gtest.h>
#include <iostream>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/stats/homogeneity_score.cuh>
#include <raft/stats/mutual_info_score.cuh>
#include <raft/util/cudart_utils.hpp>
#include <random>
namespace raft {
namespace stats {
// parameter structure definition
struct homogeneityParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
};
// test fixture class
template <typename T>
class homogeneityTest : public ::testing::TestWithParam<homogeneityParam> {
protected:
// the constructor
void SetUp() override
{
// getting the parameters
params = ::testing::TestWithParam<homogeneityParam>::GetParam();
nElements = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
stream = resource::get_cuda_stream(handle);
// generating random value test input
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange);
std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(), [&]() { return intGenerator(dre); });
}
// allocating and initializing memory to the GPU
rmm::device_uvector<T> truthClusterArray(nElements, stream);
rmm::device_uvector<T> predClusterArray(nElements, stream);
raft::update_device(truthClusterArray.data(), &arr1[0], (int)nElements, stream);
raft::update_device(predClusterArray.data(), &arr2[0], (int)nElements, stream);
// calculating the golden output
double truthMI, truthEntropy;
truthMI = raft::stats::mutual_info_score(truthClusterArray.data(),
predClusterArray.data(),
nElements,
lowerLabelRange,
upperLabelRange,
stream);
truthEntropy = raft::stats::entropy(
truthClusterArray.data(), nElements, lowerLabelRange, upperLabelRange, stream);
if (truthEntropy) {
truthHomogeneity = truthMI / truthEntropy;
} else
truthHomogeneity = 1.0;
if (nElements == 0) truthHomogeneity = 1.0;
// calling the homogeneity CUDA implementation
computedHomogeneity = raft::stats::homogeneity_score(
handle,
raft::make_device_vector_view<const T>(truthClusterArray.data(), nElements),
raft::make_device_vector_view<const T>(predClusterArray.data(), nElements),
lowerLabelRange,
upperLabelRange);
}
// declaring the data values
raft::resources handle;
homogeneityParam params;
T lowerLabelRange, upperLabelRange;
int nElements = 0;
double truthHomogeneity = 0;
double computedHomogeneity = 0;
cudaStream_t stream = 0;
};
// setting test parameter values
const std::vector<homogeneityParam> inputs = {{199, 1, 10, false, 0.000001},
{200, 15, 100, false, 0.000001},
{100, 1, 20, false, 0.000001},
{10, 1, 10, false, 0.000001},
{198, 1, 100, false, 0.000001},
{300, 3, 99, false, 0.000001},
{199, 1, 10, true, 0.000001},
{200, 15, 100, true, 0.000001},
{100, 1, 20, true, 0.000001},
{10, 1, 10, true, 0.000001},
{198, 1, 100, true, 0.000001},
{300, 3, 99, true, 0.000001}};
// writing the test suite
typedef homogeneityTest<int> homogeneityTestClass;
TEST_P(homogeneityTestClass, Result)
{
ASSERT_NEAR(computedHomogeneity, truthHomogeneity, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(homogeneity, homogeneityTestClass, ::testing::ValuesIn(inputs));
} // end namespace stats
} // end namespace raft
|
96fc3eb8049d3f3488b6fa6be4808a71e6203d2a.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(30/12/2016)
///sta versin sirve para graficar en matlab los tiempos de ejecucin, considerando N = (2^5 x 3^4 x 5^4), Li = N y Lo = vara
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li, int N);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de n_max, m_max y l_max (N = (2^n_max x 3^m_max x 5^l_max))
const int n_max = 5;
const int m_max = 4;
const int l_max = 4;
///Ingrese el valor de Li_max
const int Li_max = 1620000;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
hipSetDevice(1);
hipGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_NCompuesta_LiN_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_NCompuesta_LiN_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[13];
int cont_1,n_1,m_1,l_1,m_ant,l_ant;
cont_1 = 0;
m_ant = 0;
l_ant = 0;
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = 1;i_N <= 1;i_N++)
{
N = (int )((pow(2,n_max))*(pow(3,m_max))*(pow(5,l_max)));
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(n_1 = 1;n_1 <= n_max;n_1++)
{
for(m_1 = m_ant; m_1 <= n_1;m_1++)
{
m_ant = m_1;
for(l_1 = l_ant;l_1 <= m_1;l_1++)
{
l_ant = l_1;
if((m_1 <= m_max) && (l_1 <= l_max))
{
Lo = (int )((pow(2,n_1))*(pow(3,m_1))*(pow(5,l_1)));
cont_1++;
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_NCompuesta_C.bin","rb");
dc_open = fopen("Entrada_imag_NCompuesta_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li,N);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
promedio[cont_1-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
}
}
}
fwrite(promedio,sizeof(float),13,da);
printf("\n\nTIEMPOS:\n\n");
int time_print;
for(time_print = 0;time_print < 13;time_print++)
{
printf("\nTime (%d)= %f ms",time_print,promedio[time_print]);
}
fclose(da);
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li, int N)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
//printf("\n ERROR \n");
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
//printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
//printf("\n Numero de factores: %d ",svF);
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
} | 96fc3eb8049d3f3488b6fa6be4808a71e6203d2a.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(30/12/2016)
///Ésta versión sirve para graficar en matlab los tiempos de ejecución, considerando N = (2^5 x 3^4 x 5^4), Li = N y Lo = varía
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li, int N);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de n_max, m_max y l_max (N = (2^n_max x 3^m_max x 5^l_max))
const int n_max = 5;
const int m_max = 4;
const int l_max = 4;
///Ingrese el valor de Li_max
const int Li_max = 1620000;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
cudaSetDevice(1);
cudaGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_NCompuesta_LiN_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_NCompuesta_LiN_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[13];
int cont_1,n_1,m_1,l_1,m_ant,l_ant;
cont_1 = 0;
m_ant = 0;
l_ant = 0;
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = 1;i_N <= 1;i_N++)
{
N = (int )((pow(2,n_max))*(pow(3,m_max))*(pow(5,l_max)));
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(n_1 = 1;n_1 <= n_max;n_1++)
{
for(m_1 = m_ant; m_1 <= n_1;m_1++)
{
m_ant = m_1;
for(l_1 = l_ant;l_1 <= m_1;l_1++)
{
l_ant = l_1;
if((m_1 <= m_max) && (l_1 <= l_max))
{
Lo = (int )((pow(2,n_1))*(pow(3,m_1))*(pow(5,l_1)));
cont_1++;
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_NCompuesta_C.bin","rb");
dc_open = fopen("Entrada_imag_NCompuesta_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li,N);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
promedio[cont_1-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
}
}
}
fwrite(promedio,sizeof(float),13,da);
printf("\n\nTIEMPOS:\n\n");
int time_print;
for(time_print = 0;time_print < 13;time_print++)
{
printf("\nTime (%d)= %f ms",time_print,promedio[time_print]);
}
fclose(da);
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li, int N)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
//printf("\n ERROR \n");
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
//printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
//printf("\n Numero de factores: %d ",svF);
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
} |
bb27d8b1c7563d41043bf1c4f381ff412a23e500.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
uchar4 rgba = rgbaImage[blockIdx.x * numCols + blockIdx.y];
greyImage[blockIdx.x/*curr row*/ * numCols + blockIdx.y/*curr col*/] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, 1, 1); // threads are 1d for rgb
const dim3 gridSize( numRows, numCols, 1); // 2d grid of X*Y threads (thread per pixel)
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| bb27d8b1c7563d41043bf1c4f381ff412a23e500.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
uchar4 rgba = rgbaImage[blockIdx.x * numCols + blockIdx.y];
greyImage[blockIdx.x/*curr row*/ * numCols + blockIdx.y/*curr col*/] = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, 1, 1); // threads are 1d for rgb
const dim3 gridSize( numRows, numCols, 1); // 2d grid of X*Y threads (thread per pixel)
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
e391d9e950f35572454fce7dd49304e8421261be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace funcs {
template <typename T>
struct SelectedRowsAdd<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::SelectedRows& input2,
phi::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2.height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2.height()));
output->set_height(in1_height);
phi::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
in2_value.numel() / in2_rows.size(),
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
in2_value.numel() / in2_rows.size()));
PADDLE_ENFORCE_EQ(
in1_row_numel,
out_value->numel() / out_rows.size(),
phi::errors::InvalidArgument(
"The input and oupput width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
out_value->numel() / out_rows.size()));
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(in2_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(out_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
memory_utils::Copy(out_place,
out_data,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
auto* in2_data = in2_value.data<T>();
memory_utils::Copy(out_place,
out_data + in1_value.numel(),
in2_place,
in2_data,
in2_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAdd<phi::GPUContext, float>;
template struct SelectedRowsAdd<phi::GPUContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::DenseTensor& input2,
phi::DenseTensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument(
"The two inputs height must be equal."
"But received first input height = [%d], first input height = [%d]",
in1_height,
in2_dims[0]));
PADDLE_ENFORCE_EQ(
in1_height,
out_dims[0],
phi::errors::InvalidArgument(
"The input and output height must be equal."
"But received input height = [%d], output height = [%d]",
in1_height,
out_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2.numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2.numel() / in1_height));
PADDLE_ENFORCE_EQ(
in1_row_numel,
output->numel() / in1_height,
phi::errors::InvalidArgument(
"The input and output width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
output->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
phi::funcs::SetConstant<phi::GPUContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
hipLaunchKernelGGL(( SelectedRowsAddTensorKernel<T, block_size>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
out_data,
in1_row_numel);
auto out_eigen = EigenVector<T>::Flatten(*output);
auto in2_eigen = EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<phi::GPUContext, float>;
template struct SelectedRowsAddTensor<phi::GPUContext, double>;
template struct SelectedRowsAdd<phi::GPUContext, phi::dtype::float16>;
template struct SelectedRowsAddTensor<phi::GPUContext, phi::dtype::float16>;
template <typename T>
struct SelectedRowsAddTo<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const int64_t input2_offset,
phi::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2->height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2->height()));
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
phi::MixVector<int64_t> mixv_in2_rows(&in2_rows);
if (in1_rows.size()) {
mixv_in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory_utils::Copy(in2_place,
in2_data + input2_offset,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAddTo<phi::GPUContext, float>;
template struct SelectedRowsAddTo<phi::GPUContext, double>;
template struct SelectedRowsAddTo<phi::GPUContext, int>;
template struct SelectedRowsAddTo<phi::GPUContext, int64_t>;
template struct SelectedRowsAddTo<phi::GPUContext, phi::dtype::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
phi::DenseTensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
hipLaunchKernelGGL(( SelectedRowsAddToTensorKernel<T, block_size>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<phi::GPUContext, float>;
template struct SelectedRowsAddToTensor<phi::GPUContext, double>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int64_t>;
template struct SelectedRowsAddToTensor<phi::GPUContext, phi::dtype::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input,
const int64_t* input_rows,
T* out,
const int64_t* out_rows,
size_t out_rows_size,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
phi::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename DeviceContext, typename T>
struct MergeAddImpl {
phi::SelectedRows operator()(const DeviceContext& context,
const phi::SelectedRows& input,
const bool sorted_result = false) {
phi::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const DeviceContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result = false) {
phi::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
phi::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
void operator()(const DeviceContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const phi::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
phi::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(
input_width,
input->value().dims()[1],
phi::errors::InvalidArgument("All input should have same "
"dimension except for the first one."));
PADDLE_ENFORCE_EQ(
input_height,
input->height(),
phi::errors::InvalidArgument("All input should have same height."));
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
hipLaunchKernelGGL(( MergeAddKernel<T, 256>), dim3(grid1), dim3(threads), 0, context.stream(),
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
}
};
template <typename T>
struct MergeAdd<phi::GPUContext, T> {
// unary functor, merge by adding duplicated rows in
// the input SelectedRows object.
phi::SelectedRows operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
const bool sorted_result) {
return MergeAddImpl<phi::GPUContext, T>()(context, input, sorted_result);
}
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, input, output, sorted_result);
}
void operator()(const phi::GPUContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, inputs, output, sorted_result);
}
};
#define TEMPLATE_SPECIALIZED_FOR_MERGEADD(dtype) \
template struct MergeAddImpl<phi::GPUContext, dtype>; \
template struct MergeAdd<phi::GPUContext, dtype>;
TEMPLATE_SPECIALIZED_FOR_MERGEADD(float)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(double)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int64_t)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::float16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::bfloat16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<float>)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<double>)
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows,
const ScatterOps& op,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const ScatterOps& op,
const phi::SelectedRows& input1,
DenseTensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<phi::GPUContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(phi::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
hipLaunchKernelGGL(( UpdateToTensorKernel<T, phi::PADDLE_CUDA_NUM_THREADS>)
, dim3(grid), dim3(threads), 0, context.stream(),
in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace funcs
} // namespace phi
| e391d9e950f35572454fce7dd49304e8421261be.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <set>
#include <vector>
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/selected_rows_functor.h"
namespace phi {
namespace funcs {
template <typename T>
struct SelectedRowsAdd<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::SelectedRows& input2,
phi::SelectedRows* output) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2.height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2.height()));
output->set_height(in1_height);
phi::Vector<int64_t> in1_rows(input1.rows());
auto& in2_rows = input2.rows();
std::vector<int64_t> out_rows;
out_rows.reserve(in1_rows.size() + in2_rows.size());
// concat rows
out_rows.insert(out_rows.end(), in1_rows.begin(), in1_rows.end());
out_rows.insert(out_rows.end(), in2_rows.begin(), in2_rows.end());
output->set_rows(out_rows);
auto* out_value = output->mutable_value();
auto& in1_value = input1.value();
auto& in2_value = input2.value();
auto in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
in2_value.numel() / in2_rows.size(),
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
in2_value.numel() / in2_rows.size()));
PADDLE_ENFORCE_EQ(
in1_row_numel,
out_value->numel() / out_rows.size(),
phi::errors::InvalidArgument(
"The input and oupput width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
out_value->numel() / out_rows.size()));
auto* out_data = out_value->data<T>();
auto* in1_data = in1_value.data<T>();
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2.place();
PADDLE_ENFORCE_EQ(in2_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto out_place = context.GetPlace();
PADDLE_ENFORCE_EQ(out_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
memory_utils::Copy(out_place,
out_data,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
auto* in2_data = in2_value.data<T>();
memory_utils::Copy(out_place,
out_data + in1_value.numel(),
in2_place,
in2_data,
in2_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAdd<phi::GPUContext, float>;
template struct SelectedRowsAdd<phi::GPUContext, double>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we can not use
// tensor_out[index] += selected_rows[index]; Instead, we have to use
// AtomicAdd to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const phi::DenseTensor& input2,
phi::DenseTensor* output) {
auto in1_height = input1.height();
auto in2_dims = input2.dims();
auto out_dims = output->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument(
"The two inputs height must be equal."
"But received first input height = [%d], first input height = [%d]",
in1_height,
in2_dims[0]));
PADDLE_ENFORCE_EQ(
in1_height,
out_dims[0],
phi::errors::InvalidArgument(
"The input and output height must be equal."
"But received input height = [%d], output height = [%d]",
in1_height,
out_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2.numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2.numel() / in1_height));
PADDLE_ENFORCE_EQ(
in1_row_numel,
output->numel() / in1_height,
phi::errors::InvalidArgument(
"The input and output width must be equal."
"But received input width = [%d], output width = [%d]",
in1_row_numel,
output->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2.data<T>();
auto* out_data = output->data<T>();
phi::funcs::SetConstant<phi::GPUContext, T> functor;
functor(context, output, static_cast<T>(0));
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
SelectedRowsAddTensorKernel<T, block_size>
<<<grid, threads, 0, context.stream()>>>(
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
out_data,
in1_row_numel);
auto out_eigen = EigenVector<T>::Flatten(*output);
auto in2_eigen = EigenVector<T>::Flatten(input2);
out_eigen.device(*context.eigen_device()) = out_eigen + in2_eigen;
}
};
template struct SelectedRowsAddTensor<phi::GPUContext, float>;
template struct SelectedRowsAddTensor<phi::GPUContext, double>;
template struct SelectedRowsAdd<phi::GPUContext, phi::dtype::float16>;
template struct SelectedRowsAddTensor<phi::GPUContext, phi::dtype::float16>;
template <typename T>
struct SelectedRowsAddTo<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
const int64_t input2_offset,
phi::SelectedRows* input2) {
auto in1_height = input1.height();
PADDLE_ENFORCE_EQ(
in1_height,
input2->height(),
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
input2->height()));
auto& in1_rows = input1.rows();
auto& in2_rows = *(input2->mutable_rows());
auto& in1_value = input1.value();
auto* in2_value = input2->mutable_value();
// concat rows
phi::MixVector<int64_t> mixv_in2_rows(&in2_rows);
if (in1_rows.size()) {
mixv_in2_rows.Extend(in1_rows.begin(), in1_rows.end());
}
auto in1_place = input1.place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto in2_place = input2->place();
PADDLE_ENFORCE_EQ(in1_place.GetType() == phi::AllocationType::GPU,
true,
phi::errors::InvalidArgument(
"The running environment is not on the GPU place."));
auto* in1_data = in1_value.data<T>();
auto* in2_data = in2_value->data<T>();
memory_utils::Copy(in2_place,
in2_data + input2_offset,
in1_place,
in1_data,
in1_value.numel() * sizeof(T),
context.stream());
}
};
template struct SelectedRowsAddTo<phi::GPUContext, float>;
template struct SelectedRowsAddTo<phi::GPUContext, double>;
template struct SelectedRowsAddTo<phi::GPUContext, int>;
template struct SelectedRowsAddTo<phi::GPUContext, int64_t>;
template struct SelectedRowsAddTo<phi::GPUContext, phi::dtype::float16>;
namespace {
template <typename T, int block_size>
__global__ void SelectedRowsAddToTensorKernel(const T* selected_rows,
const int64_t* rows,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
// Since index in rows of SelectedRows can be duplicate, we have to use
// Atomic Operation to avoid concurrent write error.
phi::CudaAtomicAdd(tensor_out + index, selected_rows[index]);
}
}
} // namespace
template <typename T>
struct SelectedRowsAddToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input1,
phi::DenseTensor* input2) {
auto in1_height = input1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = input1.value();
auto& in1_rows = input1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.data<T>();
auto* in2_data = input2->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid(in1_rows.size(), 1);
phi::MixVector<int64_t> mixv_in1_rows(&in1_rows);
SelectedRowsAddToTensorKernel<T, block_size>
<<<grid, threads, 0, context.stream()>>>(
in1_data,
mixv_in1_rows.CUDAData(context.GetPlace()),
in2_data,
in1_row_numel);
}
};
template struct SelectedRowsAddToTensor<phi::GPUContext, float>;
template struct SelectedRowsAddToTensor<phi::GPUContext, double>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int>;
template struct SelectedRowsAddToTensor<phi::GPUContext, int64_t>;
template struct SelectedRowsAddToTensor<phi::GPUContext, phi::dtype::float16>;
namespace scatter {
template <typename T, int block_size>
__global__ void MergeAddKernel(const T* input,
const int64_t* input_rows,
T* out,
const int64_t* out_rows,
size_t out_rows_size,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
__shared__ size_t out_idx;
if (tid == 0) {
for (size_t i = 0; i < out_rows_size; i++) {
if (input_rows[ty] == out_rows[i]) {
out_idx = i;
}
}
}
__syncthreads();
input += ty * row_numel;
out += out_idx * row_numel;
for (int index = tid; index < row_numel; index += block_size) {
phi::CudaAtomicAdd(out + index, input[index]);
}
}
template <typename DeviceContext, typename T>
struct MergeAddImpl {
phi::SelectedRows operator()(const DeviceContext& context,
const phi::SelectedRows& input,
const bool sorted_result = false) {
phi::SelectedRows out;
(*this)(context, input, &out);
return out;
}
void operator()(const DeviceContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result = false) {
phi::Vector<int64_t> input_rows(input.rows());
if (input_rows.size() == 0) {
return;
}
phi::SelectedRows& out = *output;
std::set<int64_t> row_set(input_rows.begin(), input_rows.end());
std::vector<int64_t> merge_rows_cpu(row_set.begin(), row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
auto input_width = input.value().dims()[1];
out.set_rows(merge_rows);
out.set_height(input.height());
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
auto* input_data = input.value().data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
void operator()(const DeviceContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result = false) {
if (inputs.size() == 0) {
VLOG(3) << "no input! return";
return;
}
const phi::SelectedRows* has_value_input = nullptr;
for (auto* in : inputs) {
if (in->rows().size() > 0) {
has_value_input = in;
break;
}
}
if (has_value_input == nullptr) {
VLOG(3) << "no input has value! just return" << std::endl;
return;
}
auto input_width = has_value_input->value().dims()[1];
auto input_height = has_value_input->height();
phi::SelectedRows& out = *output;
std::set<int64_t> merged_row_set;
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
PADDLE_ENFORCE_EQ(
input_width,
input->value().dims()[1],
phi::errors::InvalidArgument("All input should have same "
"dimension except for the first one."));
PADDLE_ENFORCE_EQ(
input_height,
input->height(),
phi::errors::InvalidArgument("All input should have same height."));
merged_row_set.insert(input->rows().begin(), input->rows().end());
}
std::vector<int64_t> merge_rows_cpu(merged_row_set.begin(),
merged_row_set.end());
phi::Vector<int64_t> merge_rows(merge_rows_cpu);
out.set_rows(merge_rows);
out.set_height(input_height);
DenseTensor* out_tensor = out.mutable_value();
out_tensor->Resize(
phi::make_ddim({static_cast<int64_t>(merge_rows.size()), input_width}));
context.template Alloc<T>(out_tensor);
phi::funcs::SetConstant<DeviceContext, T> constant_functor;
constant_functor(context, out.mutable_value(), static_cast<T>(0));
auto* out_data = out.mutable_value()->data<T>();
const int block_size = 256;
dim3 threads(block_size, 1);
for (auto* input : inputs) {
if (input->rows().size() == 0) {
continue;
}
auto* input_data = input->value().data<T>();
auto& input_rows = input->rows();
dim3 grid1(input_rows.size(), 1);
phi::MixVector<int64_t> mix_vector_input(&input_rows);
phi::MixVector<int64_t> mix_vector_out(out.mutable_rows());
MergeAddKernel<T, 256><<<grid1, threads, 0, context.stream()>>>(
input_data,
mix_vector_input.CUDAData(context.GetPlace()),
out_data,
mix_vector_out.CUDAMutableData(context.GetPlace()),
out.rows().size(),
input_width);
mix_vector_out.CopyToCPU();
}
}
};
template <typename T>
struct MergeAdd<phi::GPUContext, T> {
// unary functor, merge by adding duplicated rows in
// the input SelectedRows object.
phi::SelectedRows operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
const bool sorted_result) {
return MergeAddImpl<phi::GPUContext, T>()(context, input, sorted_result);
}
void operator()(const phi::GPUContext& context,
const phi::SelectedRows& input,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, input, output, sorted_result);
}
void operator()(const phi::GPUContext& context,
const std::vector<const phi::SelectedRows*>& inputs,
phi::SelectedRows* output,
const bool sorted_result) {
MergeAddImpl<phi::GPUContext, T>()(context, inputs, output, sorted_result);
}
};
#define TEMPLATE_SPECIALIZED_FOR_MERGEADD(dtype) \
template struct MergeAddImpl<phi::GPUContext, dtype>; \
template struct MergeAdd<phi::GPUContext, dtype>;
TEMPLATE_SPECIALIZED_FOR_MERGEADD(float)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(double)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(int64_t)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::float16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::bfloat16)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<float>)
TEMPLATE_SPECIALIZED_FOR_MERGEADD(phi::dtype::complex<double>)
template <typename T, int block_size>
__global__ void UpdateToTensorKernel(const T* selected_rows,
const int64_t* rows,
const ScatterOps& op,
T* tensor_out,
int64_t row_numel) {
const int ty = blockIdx.x;
int tid = threadIdx.x;
selected_rows += ty * row_numel;
tensor_out += rows[ty] * row_numel;
// FIXME(typhoonzero): use macro fix the below messy code.
switch (op) {
case ScatterOps::ASSIGN:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index];
}
break;
case ScatterOps::ADD:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] += selected_rows[index];
}
break;
case ScatterOps::SUB:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] -= selected_rows[index];
}
break;
case ScatterOps::SUBBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] - tensor_out[index];
}
break;
case ScatterOps::MUL:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] *= selected_rows[index];
}
break;
case ScatterOps::DIV:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] /= selected_rows[index];
}
break;
case ScatterOps::DIVBY:
for (int index = tid; index < row_numel; index += block_size) {
tensor_out[index] = selected_rows[index] / tensor_out[index];
}
break;
}
}
template <typename T>
struct UpdateToTensor<phi::GPUContext, T> {
void operator()(const phi::GPUContext& context,
const ScatterOps& op,
const phi::SelectedRows& input1,
DenseTensor* input2) {
// NOTE: Use SelectedRowsAddToTensor for better performance
// no additional MergeAdd called.
MergeAdd<phi::GPUContext, T> merge_func;
auto merged_in1 = merge_func(context, input1);
auto in1_height = merged_in1.height();
auto in2_dims = input2->dims();
PADDLE_ENFORCE_EQ(
in1_height,
in2_dims[0],
phi::errors::InvalidArgument("The two inputs height must be equal."
"But received first input height = "
"[%d], second input height = [%d]",
in1_height,
in2_dims[0]));
auto& in1_value = merged_in1.value();
auto& in1_rows = merged_in1.rows();
int64_t in1_row_numel = in1_value.numel() / in1_rows.size();
PADDLE_ENFORCE_EQ(
in1_row_numel,
input2->numel() / in1_height,
phi::errors::InvalidArgument(
"The two inputs width must be equal."
"But received first input width = [%d], second input width = [%d]",
in1_row_numel,
input2->numel() / in1_height));
auto* in1_data = in1_value.template data<T>();
auto* in2_data = input2->data<T>();
dim3 threads(phi::PADDLE_CUDA_NUM_THREADS, 1);
dim3 grid(in1_rows.size(), 1);
UpdateToTensorKernel<T, phi::PADDLE_CUDA_NUM_THREADS>
<<<grid, threads, 0, context.stream()>>>(
in1_data, in1_rows.cuda_data(), op, in2_data, in1_row_numel);
}
};
} // namespace scatter
} // namespace funcs
} // namespace phi
|
024ec74edfc5d7dd32e6ebd12062e87efc801c7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/utils/math/elementwise.h"
#include <type_traits>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math/half_utils.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace {
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (i < N) {
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
c10::hip::compat::sincos(__ldg(X + i), S + i, C + i);
#else
c10::hip::compat::sincos(X[i], S + i, C + i);
#endif
}
}
#if defined(USE_ROCM)
template <typename TAlpha, typename TData>
__global__ void AxpyCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
TData* Y) {
const int64_t index = static_cast<int64_t>(blockIdx.x) *
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) +
static_cast<int64_t>(threadIdx.x);
if (index < N) {
Y[index] += static_cast<TData>(alpha) * __ldg(X + index);
}
}
template <typename TAlpha, typename TData>
__global__ void AxpyCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y) {
__shared__ TData a;
if (threadIdx.x == 0) {
a = static_cast<TData>(__ldg(alpha));
}
__syncthreads();
const int64_t index = static_cast<int64_t>(blockIdx.x) *
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) +
static_cast<int64_t>(threadIdx.x);
if (index < N) {
Y[index] += a * __ldg(X + index);
}
}
#define DELEGATE_HALF_AXPY_CUDA_KERNEL(TAlpha, FMAFunc) \
template <> \
__global__ void AxpyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
alpha, \
convert::To<at::Half, TAlpha>(X[index]), \
convert::To<at::Half, TAlpha>(Y[index]))); \
} \
} \
template <> \
__global__ void AxpyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
at::Half* Y) { \
__shared__ TAlpha a; \
if (threadIdx.x == 0) { \
a = __ldg(alpha); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
a, \
convert::To<at::Half, TAlpha>(X[index]), \
convert::To<at::Half, TAlpha>(Y[index]))); \
} \
}
DELEGATE_HALF_AXPY_CUDA_KERNEL(float, fmaf)
#undef DELEGATE_HALF_AXPY_CUDA_KERNEL
#endif // USE_ROCM
template <typename TAlpha, typename TData>
__global__ void AxpbyCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
const TAlpha beta,
TData* Y);
template <typename TAlpha, typename TData>
__global__ void AxpbyCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
const TAlpha* beta,
TData* Y);
#define DELEGATE_AXPBY_CUDA_KERNEL(TAlpha, TData, FMAFunc) \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, TData>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
const TAlpha beta, \
TData* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = FMAFunc( \
static_cast<TData>(alpha), \
X[index], \
static_cast<TData>(beta) * Y[index]); \
} \
} \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, TData>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
const TAlpha* beta, \
TData* Y) { \
__shared__ TData a; \
__shared__ TData b; \
if (threadIdx.x == 0) { \
a = static_cast<TData>(*alpha); \
b = static_cast<TData>(*beta); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = FMAFunc(a, X[index], b * Y[index]); \
} \
}
DELEGATE_AXPBY_CUDA_KERNEL(float, float, fmaf)
DELEGATE_AXPBY_CUDA_KERNEL(float, double, fma)
#undef DELEGATE_AXPBY_CUDA_KERNEL
#define DELEGATE_HALF_AXPBY_CUDA_KERNEL(TAlpha, FMAFunc) \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
const TAlpha beta, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
alpha, \
convert::To<at::Half, TAlpha>(X[index]), \
beta * convert::To<at::Half, TAlpha>(Y[index]))); \
} \
} \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
const TAlpha* beta, \
at::Half* Y) { \
__shared__ TAlpha a; \
__shared__ TAlpha b; \
if (threadIdx.x == 0) { \
a = *alpha; \
b = *beta; \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
a, \
convert::To<at::Half, TAlpha>(X[index]), \
b * convert::To<at::Half, TAlpha>(Y[index]))); \
} \
}
DELEGATE_HALF_AXPBY_CUDA_KERNEL(float, fmaf)
#undef DELEGATE_HALF_AXPBY_CUDA_KERNEL
template <typename TAlpha, typename TData>
__global__ void ScaleCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
TData* Y);
template <typename TAlpha, typename TData>
__global__ void ScaleCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y);
#define CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(TAlpha, TData) \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, TData>( \
const std::int64_t N, const TAlpha alpha, const TData* X, TData* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = static_cast<TData>(alpha) * X[index]; \
} \
} \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, TData>( \
const std::int64_t N, const TAlpha* alpha, const TData* X, TData* Y) { \
__shared__ TData a; \
if (threadIdx.x == 0) { \
a = static_cast<TData>(*alpha); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = a * X[index]; \
} \
}
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(float, float)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(double, double)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(float, double)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL
#define CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL(TAlpha) \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>( \
alpha * convert::To<at::Half, TAlpha>(X[index])); \
} \
} \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
at::Half* Y) { \
__shared__ TAlpha a; \
if (threadIdx.x == 0) { \
a = *alpha; \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>( \
a * convert::To<at::Half, TAlpha>(X[index])); \
} \
}
CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL(float)
#undef CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_EXPORT void Set<T, CUDAContext>( \
const std::int64_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
C10_HIP_CHECK(hipMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream())); \
} else { \
thrust::fill( \
thrust::hip::par.on(context->cuda_stream()), Y, Y + N, alpha); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(bool)
CAFFE2_SPECIALIZED_CUDA_SET(char)
CAFFE2_SPECIALIZED_CUDA_SET(std::int8_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int16_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::uint8_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::uint16_t)
CAFFE2_SPECIALIZED_CUDA_SET(float)
CAFFE2_SPECIALIZED_CUDA_SET(double)
CAFFE2_SPECIALIZED_CUDA_SET(at::Half)
CAFFE2_SPECIALIZED_CUDA_SET(at::BFloat16)
#undef CAFFE2_SPECIALIZED_CUDA_SET
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* X, T* Y, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::hip::par.on(context->cuda_stream()), \
X, \
X + N, \
Y, \
[] __device__(const T x) { return DeviceFunc(x); }); \
} \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log1p, log1pf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Erf, erff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Erf, erf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, CdfNorm, normcdff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, CdfNorm, normcdf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not<bool>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_CUDA_POWX(T, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Powx<T, CUDAContext>( \
const int N, const T* A, const T b, T* Y, CUDAContext* context) { \
thrust::transform( \
thrust::hip::par.on(context->cuda_stream()), \
A, \
A + N, \
Y, \
[b] __device__(const T x) { return DeviceFunc(x, b); }); \
}
DELEGATE_CUDA_POWX(float, powf)
#undef DELEGATE_CUDA_POWX
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* X, T* S, T* C, CUDAContext* context) { \
if (N > 0) { \
const int K = DivUp(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( SinCosCUDAKernel<T>) \
, dim3(K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, X, S, C); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_CUDA_SCALE(T, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<T, T, CUDAContext>( \
const std::int64_t N, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, Y, 1)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<T, T>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<T, T, CUDAContext>( \
const std::int64_t N, \
const T* alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, Y, 1)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<T, T>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_CUDA_SCALE(float, hipblasSscal)
DELEGATE_CUDA_SCALE(double, hipblasDscal)
#undef DELEGATE_CUDA_SCALE
#if !defined(USE_ROCM)
#define DELEGATE_CUDA_SCALE_EX( \
TAlpha, TData, kAlphaType, kDataType, kExecutionType) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(hipblasScalEx_v2( \
context->cublas_handle(), \
N, \
&alpha, \
kAlphaType, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(hipblasScalEx_v2( \
context->cublas_handle(), \
N, \
alpha, \
kAlphaType, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_CUDA_SCALE_EX(float, double, HIP_R_32F, HIP_R_64F, HIP_R_64F)
DELEGATE_CUDA_SCALE_EX(float, at::Half, HIP_R_32F, HIP_R_16F, HIP_R_32F)
#undef DELEGATE_CUDA_SCALE_EX
#endif // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N > 0) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N > 0) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( ScaleCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, *alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#if defined(USE_ROCM)
CAFFE2_SPECIALIZED_CUDA_SCALE(float, double)
CAFFE2_SPECIALIZED_CUDA_SCALE(float, at::Half)
#endif // USE_ROCM
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Func, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* A, const T* B, T* C, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::hip::par.on(context->cuda_stream()), \
A, \
A + N, \
B, \
C, \
DeviceFunc); \
} \
}
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Add,
thrust::plus<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Add,
thrust::plus<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, thrust::plus<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Add, thrust::plus<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Add, utils::HalfAddFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Sub,
thrust::minus<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Sub,
thrust::minus<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, thrust::minus<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Sub, thrust::minus<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Sub, utils::HalfSubFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Mul,
thrust::multiplies<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Mul,
thrust::multiplies<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, thrust::multiplies<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Mul, thrust::multiplies<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Mul, utils::HalfMulFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Div,
thrust::divides<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Div,
thrust::divides<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, thrust::divides<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Div, thrust::divides<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Div, utils::HalfDivFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Min, thrust::minimum<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Min, thrust::minimum<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Max, thrust::maximum<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Max, thrust::maximum<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, And, thrust::logical_and<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, Or, thrust::logical_or<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, Xor, thrust::bit_xor<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseAnd, thrust::bit_and<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseAnd,
thrust::bit_and<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseAnd,
thrust::bit_and<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseOr, thrust::bit_or<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseOr,
thrust::bit_or<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseOr,
thrust::bit_or<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseXor, thrust::bit_xor<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseXor,
thrust::bit_xor<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseXor,
thrust::bit_xor<std::int64_t>())
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(T, Func, DeviceComp) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* A, const T* B, bool* C, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::hip::par.on(context->cuda_stream()), \
A, \
A + N, \
B, \
C, \
DeviceComp); \
} \
}
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, EQ, thrust::equal_to<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
EQ,
thrust::equal_to<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
EQ,
thrust::equal_to<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, EQ, thrust::equal_to<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, EQ, thrust::equal_to<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, NE, thrust::not_equal_to<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
NE,
thrust::not_equal_to<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
NE,
thrust::not_equal_to<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, NE, thrust::not_equal_to<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
double,
NE,
thrust::not_equal_to<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, LT, thrust::less<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
LT,
thrust::less<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
LT,
thrust::less<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, LT, thrust::less<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, LT, thrust::less<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, LE, thrust::less_equal<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
LE,
thrust::less_equal<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
LE,
thrust::less_equal<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, LE, thrust::less_equal<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, LE, thrust::less_equal<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, GT, thrust::greater<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
GT,
thrust::greater<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
GT,
thrust::greater<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, GT, thrust::greater<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, GT, thrust::greater<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, GE, thrust::greater_equal<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
GE,
thrust::greater_equal<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
GE,
thrust::greater_equal<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, GE, thrust::greater_equal<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
double,
GE,
thrust::greater_equal<double>())
#undef DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DELEGATE_CUDA_AXPY(T, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<T, T, CUDAContext>( \
const std::int64_t N, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE( \
CuBLASFunc(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<T, T, CUDAContext>( \
const std::int64_t N, \
const T* alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE( \
hipblasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1)); \
}
DELEGATE_CUDA_AXPY(float, hipblasSaxpy)
#undef DELEGATE_CUDA_AXPY
#if !defined(USE_ROCM)
#define DELEGATE_CUDA_AXPY_EX( \
TAlpha, TData, kAlphaType, kDataType, kExecutionType) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(hipblasAxpyEx_v2( \
context->cublas_handle(), \
N, \
&alpha, \
kAlphaType, \
X, \
kDataType, \
1, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(hipblasSetPointerMode( \
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(hipblasAxpyEx_v2( \
context->cublas_handle(), \
N, \
alpha, \
kAlphaType, \
X, \
kDataType, \
1, \
Y, \
kDataType, \
1, \
kExecutionType)); \
}
DELEGATE_CUDA_AXPY_EX(float, double, HIP_R_32F, HIP_R_64F, HIP_R_64F)
DELEGATE_CUDA_AXPY_EX(float, at::Half, HIP_R_32F, HIP_R_16F, HIP_R_32F)
#undef DELEGATE_CUDA_AXPY_EX
#else // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_AXPY(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AxpyCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AxpyCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_AXPY(float, double)
CAFFE2_SPECIALIZED_CUDA_AXPY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPY
#endif // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
const TAlpha beta, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, beta, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
const TAlpha* beta, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TAlpha, TData>) \
, dim3(M), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), \
N, alpha, X, beta, Y); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, double)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
} // namespace math
} // namespace caffe2
| 024ec74edfc5d7dd32e6ebd12062e87efc801c7e.cu | #include "caffe2/utils/math/elementwise.h"
#include <type_traits>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math/half_utils.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace {
template <typename T>
__global__ void SinCosCUDAKernel(const int N, const T* X, T* S, T* C) {
const int i = blockIdx.x * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (i < N) {
#if __CUDA_ARCH__ >= 350 || defined(USE_ROCM)
c10::cuda::compat::sincos(__ldg(X + i), S + i, C + i);
#else
c10::cuda::compat::sincos(X[i], S + i, C + i);
#endif
}
}
#if defined(USE_ROCM)
template <typename TAlpha, typename TData>
__global__ void AxpyCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
TData* Y) {
const int64_t index = static_cast<int64_t>(blockIdx.x) *
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) +
static_cast<int64_t>(threadIdx.x);
if (index < N) {
Y[index] += static_cast<TData>(alpha) * __ldg(X + index);
}
}
template <typename TAlpha, typename TData>
__global__ void AxpyCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y) {
__shared__ TData a;
if (threadIdx.x == 0) {
a = static_cast<TData>(__ldg(alpha));
}
__syncthreads();
const int64_t index = static_cast<int64_t>(blockIdx.x) *
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) +
static_cast<int64_t>(threadIdx.x);
if (index < N) {
Y[index] += a * __ldg(X + index);
}
}
#define DELEGATE_HALF_AXPY_CUDA_KERNEL(TAlpha, FMAFunc) \
template <> \
__global__ void AxpyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
alpha, \
convert::To<at::Half, TAlpha>(X[index]), \
convert::To<at::Half, TAlpha>(Y[index]))); \
} \
} \
template <> \
__global__ void AxpyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
at::Half* Y) { \
__shared__ TAlpha a; \
if (threadIdx.x == 0) { \
a = __ldg(alpha); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
a, \
convert::To<at::Half, TAlpha>(X[index]), \
convert::To<at::Half, TAlpha>(Y[index]))); \
} \
}
DELEGATE_HALF_AXPY_CUDA_KERNEL(float, fmaf)
#undef DELEGATE_HALF_AXPY_CUDA_KERNEL
#endif // USE_ROCM
template <typename TAlpha, typename TData>
__global__ void AxpbyCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
const TAlpha beta,
TData* Y);
template <typename TAlpha, typename TData>
__global__ void AxpbyCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
const TAlpha* beta,
TData* Y);
#define DELEGATE_AXPBY_CUDA_KERNEL(TAlpha, TData, FMAFunc) \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, TData>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
const TAlpha beta, \
TData* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = FMAFunc( \
static_cast<TData>(alpha), \
X[index], \
static_cast<TData>(beta) * Y[index]); \
} \
} \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, TData>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
const TAlpha* beta, \
TData* Y) { \
__shared__ TData a; \
__shared__ TData b; \
if (threadIdx.x == 0) { \
a = static_cast<TData>(*alpha); \
b = static_cast<TData>(*beta); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = FMAFunc(a, X[index], b * Y[index]); \
} \
}
DELEGATE_AXPBY_CUDA_KERNEL(float, float, fmaf)
DELEGATE_AXPBY_CUDA_KERNEL(float, double, fma)
#undef DELEGATE_AXPBY_CUDA_KERNEL
#define DELEGATE_HALF_AXPBY_CUDA_KERNEL(TAlpha, FMAFunc) \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
const TAlpha beta, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
alpha, \
convert::To<at::Half, TAlpha>(X[index]), \
beta * convert::To<at::Half, TAlpha>(Y[index]))); \
} \
} \
template <> \
__global__ void AxpbyCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
const TAlpha* beta, \
at::Half* Y) { \
__shared__ TAlpha a; \
__shared__ TAlpha b; \
if (threadIdx.x == 0) { \
a = *alpha; \
b = *beta; \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>(FMAFunc( \
a, \
convert::To<at::Half, TAlpha>(X[index]), \
b * convert::To<at::Half, TAlpha>(Y[index]))); \
} \
}
DELEGATE_HALF_AXPBY_CUDA_KERNEL(float, fmaf)
#undef DELEGATE_HALF_AXPBY_CUDA_KERNEL
template <typename TAlpha, typename TData>
__global__ void ScaleCUDAKernel(
const std::int64_t N,
const TAlpha alpha,
const TData* X,
TData* Y);
template <typename TAlpha, typename TData>
__global__ void ScaleCUDAKernel(
const std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y);
#define CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(TAlpha, TData) \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, TData>( \
const std::int64_t N, const TAlpha alpha, const TData* X, TData* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = static_cast<TData>(alpha) * X[index]; \
} \
} \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, TData>( \
const std::int64_t N, const TAlpha* alpha, const TData* X, TData* Y) { \
__shared__ TData a; \
if (threadIdx.x == 0) { \
a = static_cast<TData>(*alpha); \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = a * X[index]; \
} \
}
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(float, float)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(double, double)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(float, double)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL(std::int64_t, std::int64_t)
#undef CAFFE2_SPECIALIZED_SCALE_CUDA_KERNEL
#define CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL(TAlpha) \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha alpha, \
const at::Half* X, \
at::Half* Y) { \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>( \
alpha * convert::To<at::Half, TAlpha>(X[index])); \
} \
} \
template <> \
__global__ void ScaleCUDAKernel<TAlpha, at::Half>( \
const std::int64_t N, \
const TAlpha* alpha, \
const at::Half* X, \
at::Half* Y) { \
__shared__ TAlpha a; \
if (threadIdx.x == 0) { \
a = *alpha; \
} \
__syncthreads(); \
const int64_t index = static_cast<int64_t>(blockIdx.x) * \
static_cast<int64_t>(CAFFE_CUDA_NUM_THREADS) + \
static_cast<int64_t>(threadIdx.x); \
if (index < N) { \
Y[index] = convert::To<TAlpha, at::Half>( \
a * convert::To<at::Half, TAlpha>(X[index])); \
} \
}
CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL(float)
#undef CAFFE2_SPECIALIZED_HALF_SCALE_CUDA_KERNEL
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
CAFFE2_CUDA_EXPORT void Set<T, CUDAContext>( \
const std::int64_t N, const T alpha, T* Y, CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (alpha == T(0)) { \
C10_CUDA_CHECK(cudaMemsetAsync(Y, 0, sizeof(T) * N, context->cuda_stream())); \
} else { \
thrust::fill( \
thrust::cuda::par.on(context->cuda_stream()), Y, Y + N, alpha); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SET(bool)
CAFFE2_SPECIALIZED_CUDA_SET(char)
CAFFE2_SPECIALIZED_CUDA_SET(std::int8_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int16_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::uint8_t)
CAFFE2_SPECIALIZED_CUDA_SET(std::uint16_t)
CAFFE2_SPECIALIZED_CUDA_SET(float)
CAFFE2_SPECIALIZED_CUDA_SET(double)
CAFFE2_SPECIALIZED_CUDA_SET(at::Half)
CAFFE2_SPECIALIZED_CUDA_SET(at::BFloat16)
#undef CAFFE2_SPECIALIZED_CUDA_SET
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Func, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* X, T* Y, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::cuda::par.on(context->cuda_stream()), \
X, \
X + N, \
Y, \
[] __device__(const T x) { return DeviceFunc(x); }); \
} \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log1p, log1pf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Asin, asinf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Acos, acosf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tan, tanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Atan, atanf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sinh, sinhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cosh, coshf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Tanh, tanhf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Inv, utils::Inv<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Inv, utils::Inv<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, utils::Square<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Rsqrt, rsqrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Cube,
utils::Cube<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Cube,
utils::Cube<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cube, utils::Cube<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Cube, utils::Cube<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cbrt, cbrtf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Erf, erff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Erf, erf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, CdfNorm, normcdff)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, CdfNorm, normcdf)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(bool, Not, utils::Not<bool>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Neg,
utils::Negate<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Neg,
utils::Negate<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Neg, utils::Negate<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Neg, utils::Negate<double>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int32_t,
Sign,
utils::Sign<std::int32_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(
std::int64_t,
Sign,
utils::Sign<std::int64_t>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sign, utils::Sign<float>)
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(double, Sign, utils::Sign<double>)
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_CUDA_POWX(T, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Powx<T, CUDAContext>( \
const int N, const T* A, const T b, T* Y, CUDAContext* context) { \
thrust::transform( \
thrust::cuda::par.on(context->cuda_stream()), \
A, \
A + N, \
Y, \
[b] __device__(const T x) { return DeviceFunc(x, b); }); \
}
DELEGATE_CUDA_POWX(float, powf)
#undef DELEGATE_CUDA_POWX
#define CAFFE2_SPECIALIZED_CUDA_SINCOS(T) \
template <> \
CAFFE2_CUDA_EXPORT void SinCos<T, CUDAContext>( \
const int N, const T* X, T* S, T* C, CUDAContext* context) { \
if (N > 0) { \
const int K = DivUp(N, CAFFE_CUDA_NUM_THREADS); \
SinCosCUDAKernel<T> \
<<<K, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, X, S, C); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SINCOS(float)
CAFFE2_SPECIALIZED_CUDA_SINCOS(double)
#undef CAFFE2_SPECIALIZED_CUDA_SINCOS
#define DELEGATE_CUDA_SCALE(T, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<T, T, CUDAContext>( \
const std::int64_t N, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, &alpha, Y, 1)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<T, T> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<T, T, CUDAContext>( \
const std::int64_t N, \
const T* alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(CuBLASFunc(context->cublas_handle(), N, alpha, Y, 1)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<T, T> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_CUDA_SCALE(float, cublasSscal)
DELEGATE_CUDA_SCALE(double, cublasDscal)
#undef DELEGATE_CUDA_SCALE
#if !defined(USE_ROCM)
#define DELEGATE_CUDA_SCALE_EX( \
TAlpha, TData, kAlphaType, kDataType, kExecutionType) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(cublasScalEx( \
context->cublas_handle(), \
N, \
&alpha, \
kAlphaType, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N == 0) { \
return; \
} \
if (Y == X) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(cublasScalEx( \
context->cublas_handle(), \
N, \
alpha, \
kAlphaType, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} else { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
DELEGATE_CUDA_SCALE_EX(float, double, CUDA_R_32F, CUDA_R_64F, CUDA_R_64F)
DELEGATE_CUDA_SCALE_EX(float, at::Half, CUDA_R_32F, CUDA_R_16F, CUDA_R_32F)
#undef DELEGATE_CUDA_SCALE_EX
#endif // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_SCALE(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N > 0) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
} \
template <> \
CAFFE2_CUDA_EXPORT void Scale<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
if (N > 0) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
ScaleCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, *alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
}
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int32_t, std::int32_t)
CAFFE2_SPECIALIZED_CUDA_SCALE(std::int64_t, std::int64_t)
#if defined(USE_ROCM)
CAFFE2_SPECIALIZED_CUDA_SCALE(float, double)
CAFFE2_SPECIALIZED_CUDA_SCALE(float, at::Half)
#endif // USE_ROCM
#undef CAFFE2_SPECIALIZED_CUDA_SCALE
#define DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(T, Func, DeviceFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* A, const T* B, T* C, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::cuda::par.on(context->cuda_stream()), \
A, \
A + N, \
B, \
C, \
DeviceFunc); \
} \
}
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Add,
thrust::plus<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Add,
thrust::plus<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Add, thrust::plus<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Add, thrust::plus<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Add, utils::HalfAddFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Sub,
thrust::minus<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Sub,
thrust::minus<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Sub, thrust::minus<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Sub, thrust::minus<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Sub, utils::HalfSubFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Mul,
thrust::multiplies<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Mul,
thrust::multiplies<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Mul, thrust::multiplies<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Mul, thrust::multiplies<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Mul, utils::HalfMulFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
Div,
thrust::divides<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
Div,
thrust::divides<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Div, thrust::divides<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Div, thrust::divides<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(at::Half, Div, utils::HalfDivFunctor())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Min, thrust::minimum<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Min, thrust::minimum<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(float, Max, thrust::maximum<float>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(double, Max, thrust::maximum<double>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, And, thrust::logical_and<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, Or, thrust::logical_or<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, Xor, thrust::bit_xor<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseAnd, thrust::bit_and<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseAnd,
thrust::bit_and<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseAnd,
thrust::bit_and<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseOr, thrust::bit_or<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseOr,
thrust::bit_or<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseOr,
thrust::bit_or<std::int64_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(bool, BitwiseXor, thrust::bit_xor<bool>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int32_t,
BitwiseXor,
thrust::bit_xor<std::int32_t>())
DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION(
std::int64_t,
BitwiseXor,
thrust::bit_xor<std::int64_t>())
#undef DELEGATE_SIMPLE_CUDA_BINARY_FUNCTION
#define DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(T, Func, DeviceComp) \
template <> \
CAFFE2_CUDA_EXPORT void Func<T, CUDAContext>( \
const int N, const T* A, const T* B, bool* C, CUDAContext* context) { \
if (N > 0) { \
thrust::transform( \
thrust::cuda::par.on(context->cuda_stream()), \
A, \
A + N, \
B, \
C, \
DeviceComp); \
} \
}
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, EQ, thrust::equal_to<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
EQ,
thrust::equal_to<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
EQ,
thrust::equal_to<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, EQ, thrust::equal_to<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, EQ, thrust::equal_to<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, NE, thrust::not_equal_to<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
NE,
thrust::not_equal_to<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
NE,
thrust::not_equal_to<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, NE, thrust::not_equal_to<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
double,
NE,
thrust::not_equal_to<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, LT, thrust::less<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
LT,
thrust::less<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
LT,
thrust::less<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, LT, thrust::less<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, LT, thrust::less<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, LE, thrust::less_equal<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
LE,
thrust::less_equal<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
LE,
thrust::less_equal<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, LE, thrust::less_equal<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, LE, thrust::less_equal<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, GT, thrust::greater<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
GT,
thrust::greater<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
GT,
thrust::greater<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, GT, thrust::greater<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(double, GT, thrust::greater<double>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(bool, GE, thrust::greater_equal<bool>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int32_t,
GE,
thrust::greater_equal<std::int32_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
std::int64_t,
GE,
thrust::greater_equal<std::int64_t>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(float, GE, thrust::greater_equal<float>())
DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION(
double,
GE,
thrust::greater_equal<double>())
#undef DELEGATE_SIMPLE_CUDA_COMPARE_FUNCTION
#define DELEGATE_CUDA_AXPY(T, CuBLASFunc) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<T, T, CUDAContext>( \
const std::int64_t N, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE( \
CuBLASFunc(context->cublas_handle(), N, &alpha, X, 1, Y, 1)); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<T, T, CUDAContext>( \
const std::int64_t N, \
const T* alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE( \
cublasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1)); \
}
DELEGATE_CUDA_AXPY(float, cublasSaxpy)
#undef DELEGATE_CUDA_AXPY
#if !defined(USE_ROCM)
#define DELEGATE_CUDA_AXPY_EX( \
TAlpha, TData, kAlphaType, kDataType, kExecutionType) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST)); \
CUBLAS_ENFORCE(cublasAxpyEx( \
context->cublas_handle(), \
N, \
&alpha, \
kAlphaType, \
X, \
kDataType, \
1, \
Y, \
kDataType, \
1, \
kExecutionType)); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
CUBLAS_ENFORCE(cublasSetPointerMode( \
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE)); \
CUBLAS_ENFORCE(cublasAxpyEx( \
context->cublas_handle(), \
N, \
alpha, \
kAlphaType, \
X, \
kDataType, \
1, \
Y, \
kDataType, \
1, \
kExecutionType)); \
}
DELEGATE_CUDA_AXPY_EX(float, double, CUDA_R_32F, CUDA_R_64F, CUDA_R_64F)
DELEGATE_CUDA_AXPY_EX(float, at::Half, CUDA_R_32F, CUDA_R_16F, CUDA_R_32F)
#undef DELEGATE_CUDA_AXPY_EX
#else // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_AXPY(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
AxpyCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpy<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
AxpyCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_AXPY(float, double)
CAFFE2_SPECIALIZED_CUDA_AXPY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPY
#endif // USE_ROCM
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TAlpha, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha alpha, \
const TData* X, \
const TAlpha beta, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
AxpbyCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, beta, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TAlpha, TData, CUDAContext>( \
const std::int64_t N, \
const TAlpha* alpha, \
const TData* X, \
const TAlpha* beta, \
TData* Y, \
CUDAContext* context) { \
const std::int64_t M = DivUp<std::int64_t>(N, CAFFE_CUDA_NUM_THREADS); \
AxpbyCUDAKernel<TAlpha, TData> \
<<<M, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( \
N, alpha, X, beta, Y); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, double)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
} // namespace math
} // namespace caffe2
|
bf86eb78fe2cbefacf6e773271a3160bc768171a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMode.cu"
#else
THC_API void THCTensor_(calculateMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position) {
THAssert(THCTensor_(isContiguous)(state, input));
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
real *data = THCTensor_(data)(state, input);
for (int i = 0; i < THLongStorage_size(position); ++i) {
data += THLongStorage_data(position)[i] * THCTensor_(stride)(state, input, i);
}
int64_t nElement = THCTensor_(size)(state, input, THCTensor_(_nDimension)(state, input) - 1);
THCThrustAllocator thrustAlloc(state);
// Wrap input data, sortBuffer, in Thrust device vectors
thrust::device_ptr<real> vecPtr = thrust::device_pointer_cast(data);
thrust::device_vector<real> iter(vecPtr, vecPtr + nElement);
thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer));
thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement);
// Fill sortBuffer with [0, 1, 2, ... nElement - 1]
thrust::sequence(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
seq.begin(), seq.end());
// Sort the input data. The original indices of the data are stored in seq
thrust::sort_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), seq.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfLess()
#endif
);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 + thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(),
#if defined(THC_REAL_IS_HALF)
ThrustHalfNotEqualTo()
#else
thrust::not_equal_to<real>()
#endif
);
// Count frequency of each element
thrust::device_vector<real> keys(unique);
thrust::device_vector<int> counts(unique);
thrust::reduce_by_key(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(),
thrust::constant_iterator<int>(1), keys.begin(), counts.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfEqualTo()
#endif
);
// Find index of maximum count
thrust::device_vector<int>::iterator it = thrust::max_element(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
counts.begin(), counts.end());
real mode = keys[it - counts.begin()];
// Find first index within which it occurs
#if defined(THC_REAL_IS_HALF)
thrust::device_vector<real>::iterator positionIter = thrust::find_if(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode));
#else
thrust::device_vector<real>::iterator positionIter = thrust::find(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), mode);
#endif
THAssert(positionIter != iter.end());
int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()];
// Place mode, index in output
ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values);
int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices);
for (int i = 0; i < THLongStorage_size(position); ++i) {
int64_t pos = THLongStorage_data(position)[i];
valuesOffset += THCTensor_(stride)(state, values, i) * pos;
indicesOffset += THCudaLongTensor_stride(state, indices, i) * pos;
}
THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode);
THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index);
}
// this probably could be a loop, not a recursive algorithm
THC_API void THCTensor_(dimApplyMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position,
int curDim) {
int64_t ndim = THCTensor_(_nDimension)(state, input);
// Because we have transposed the Tensor, the data for the dimension we are mode'ing along
// is always in the innermost dimension
if (curDim == ndim - 1) {
THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position);
} else {
// Loop through the values and recurse
for (int i = 0; i < THCTensor_(size)(state, input, curDim); ++i) {
THLongStorage_data(position)[curDim] = i;
THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1);
}
}
}
#define MAX_GRID_SIZE 65535
#define MAX_BLOCK_SIZE 1024
THC_API void THCTensor_(mode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
int dimension,
int keepdim) {
THLongStorage *dim;
THCTensor *transposed, *contiguous, *valuesTransposed;
THLongStorage *position;
THCudaLongStorage *sortBuffer;
THCudaLongTensor *indicesTransposed;
int64_t ndim, sliceSize, slices;
THAssert(THCTensor_(checkGPU)(state, 1, values));
// Verify they are asking for a valid dimension
ndim = THCTensor_(_nDimension)(state, input);
THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds");
sliceSize = THCTensor_(size)(state, input, dimension);
slices = THCTensor_(nElement)(state, input) / sliceSize;
// Resize output value, index Tensors to appropriate sizes (i.e. the same as
// the input Tensor, except at dim=dimension, the size is 1)
THCTensor_preserveReduceDimSemantics(
state, values, ndim, dimension, keepdim);
THCTensor_preserveReduceDimSemantics(
state, indices, ndim, dimension, keepdim);
dim = THCTensor_(newSizeOf)(state, input);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, values, dim, NULL);
THCudaLongTensor_resize(state, indices, dim, NULL);
THLongStorage_free(dim);
// If sliceSize is 1, copy input to values and set indices
if (sliceSize == 1) {
THCTensor_(copy)(state, values, input);
THCudaLongTensor_fill(state, indices, TH_INDEX_BASE);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
return;
}
// Requirements for fused kernel implementation:
//
// 1. sliceSize <= 2 * max threads per block
// 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for
// a kernel launch
// 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed)
if (sliceSize <= MAX_BLOCK_SIZE &&
slices <= MAX_GRID_SIZE &&
THCTensor_canUse32BitIndexMath(state, input)) {
// Beginning our optimized implementation. First thing we want to do is to transpose
// the input Tensor along the sort dimension, and then make it contiguous
transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1);
// Set-up TensorInfo structs for passing to kernel
TensorInfo<real, unsigned int> tiValues = getTensorInfo<real, THCTensor, unsigned int>(state, valuesTransposed);
TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indicesTransposed);
// The number of blocks is the number of slices that we need to calculate the mode for. Each block
// is responsible for computing a single mode
dim3 grid;
THC_getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power of 2
int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Macro that calls kernel --> note that we set the block dimensions here, and
// the amount of shared memory
#define HANDLE_MODE(SIZE) \
{ \
dim3 blockSize(SIZE / 2); \
\
int memsize = (sizeof(real) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \
hipLaunchKernelGGL(( computeMode<real, SIZE>) \
, dim3(grid), dim3(blockSize), memsize, THCState_getCurrentStream(state), \
THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \
}
// Tradeoff between compilation time and the number of specializations. Ideally we would have
// one HANDLE_MODE for each power of 2
switch(ceilPowerOf2) {
case 2048:
HANDLE_MODE(2048)
break;
case 1024:
case 512:
case 256:
HANDLE_MODE(1024)
break;
case 128:
case 64:
HANDLE_MODE(128)
break;
case 32:
case 16:
case 8:
case 4:
case 2:
HANDLE_MODE(32)
break;
case 1:
default:
assert(false);
}
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, transposed);
THCTensor_(free)(state, contiguous);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
} else {
// Beginning our naive implementation: We don't want to mutate the input Tensor, but
// we need to be able to sort the inputs along the dimension in order to calculate the
// mode. Additionally, its ideal if the data along the dimension is contiguous. So
// we transpose the dimension with the innermost dimension and make a new contiguous
// version that we can use.
transposed = THCTensor_(newClone)(state, input);
THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
THCTensor_(free)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1);
// Position is a Storage that will store the dimension values we are processing
position = THLongStorage_newWithSize(ndim - 1);
// Sort Buffer is a Storage that will be used in the internal sort required to calculate
// the mode efficiently
sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize);
// Call mode
THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0);
THCTensor_(free)(state, contiguous);
THLongStorage_free(position);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
THCudaLongStorage_free(state, sortBuffer);
}
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
}
#undef MAX_GRID_SIZE
#undef MAX_BLOCK_SIZE
#endif
| bf86eb78fe2cbefacf6e773271a3160bc768171a.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMode.cu"
#else
THC_API void THCTensor_(calculateMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position) {
THAssert(THCTensor_(isContiguous)(state, input));
// Because the input is contiguous, we want to get a reference to the
// location of the buffer at the innermost dimension that we are going
// to calculate the mode for --> we do this by manually doing the stride
// calculations to get an offset
real *data = THCTensor_(data)(state, input);
for (int i = 0; i < THLongStorage_size(position); ++i) {
data += THLongStorage_data(position)[i] * THCTensor_(stride)(state, input, i);
}
int64_t nElement = THCTensor_(size)(state, input, THCTensor_(_nDimension)(state, input) - 1);
THCThrustAllocator thrustAlloc(state);
// Wrap input data, sortBuffer, in Thrust device vectors
thrust::device_ptr<real> vecPtr = thrust::device_pointer_cast(data);
thrust::device_vector<real> iter(vecPtr, vecPtr + nElement);
thrust::device_ptr<int64_t> sbPtr = thrust::device_pointer_cast(THCudaLongStorage_data(state, sortBuffer));
thrust::device_vector<int64_t> seq(sbPtr, sbPtr + nElement);
// Fill sortBuffer with [0, 1, 2, ... nElement - 1]
thrust::sequence(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
seq.begin(), seq.end());
// Sort the input data. The original indices of the data are stored in seq
thrust::sort_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), seq.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfLess()
#endif
);
// Count # of unique elements via an inner product between adjacent elements.
// Add 1 if two neighboring element are not equal.
int unique = 1 + thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end() - 1, iter.begin() + 1, 0, thrust::plus<int>(),
#if defined(THC_REAL_IS_HALF)
ThrustHalfNotEqualTo()
#else
thrust::not_equal_to<real>()
#endif
);
// Count frequency of each element
thrust::device_vector<real> keys(unique);
thrust::device_vector<int> counts(unique);
thrust::reduce_by_key(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(),
thrust::constant_iterator<int>(1), keys.begin(), counts.begin()
#if defined(THC_REAL_IS_HALF)
, ThrustHalfEqualTo()
#endif
);
// Find index of maximum count
thrust::device_vector<int>::iterator it = thrust::max_element(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
counts.begin(), counts.end());
real mode = keys[it - counts.begin()];
// Find first index within which it occurs
#if defined(THC_REAL_IS_HALF)
thrust::device_vector<real>::iterator positionIter = thrust::find_if(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), ThrustHalfEqualToPredicate(mode));
#else
thrust::device_vector<real>::iterator positionIter = thrust::find(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#else
thrust::device,
#endif
iter.begin(), iter.end(), mode);
#endif
THAssert(positionIter != iter.end());
int64_t index = TH_INDEX_BASE + seq[positionIter - iter.begin()];
// Place mode, index in output
ptrdiff_t valuesOffset = THCTensor_(storageOffset)(state, values);
int64_t indicesOffset = THCudaLongTensor_storageOffset(state, indices);
for (int i = 0; i < THLongStorage_size(position); ++i) {
int64_t pos = THLongStorage_data(position)[i];
valuesOffset += THCTensor_(stride)(state, values, i) * pos;
indicesOffset += THCudaLongTensor_stride(state, indices, i) * pos;
}
THCStorage_(set)(state, THCTensor_(storage)(state, values), valuesOffset, mode);
THCudaLongStorage_set(state, THCudaLongTensor_storage(state, indices), indicesOffset, index);
}
// this probably could be a loop, not a recursive algorithm
THC_API void THCTensor_(dimApplyMode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
THCudaLongStorage *sortBuffer,
int dimension,
THLongStorage *position,
int curDim) {
int64_t ndim = THCTensor_(_nDimension)(state, input);
// Because we have transposed the Tensor, the data for the dimension we are mode'ing along
// is always in the innermost dimension
if (curDim == ndim - 1) {
THCTensor_(calculateMode)(state, values, indices, input, sortBuffer, dimension, position);
} else {
// Loop through the values and recurse
for (int i = 0; i < THCTensor_(size)(state, input, curDim); ++i) {
THLongStorage_data(position)[curDim] = i;
THCTensor_(dimApplyMode)(state, values, indices, input, sortBuffer, dimension, position, curDim + 1);
}
}
}
#define MAX_GRID_SIZE 65535
#define MAX_BLOCK_SIZE 1024
THC_API void THCTensor_(mode)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *input,
int dimension,
int keepdim) {
THLongStorage *dim;
THCTensor *transposed, *contiguous, *valuesTransposed;
THLongStorage *position;
THCudaLongStorage *sortBuffer;
THCudaLongTensor *indicesTransposed;
int64_t ndim, sliceSize, slices;
THAssert(THCTensor_(checkGPU)(state, 1, values));
// Verify they are asking for a valid dimension
ndim = THCTensor_(_nDimension)(state, input);
THArgCheck(dimension >= 0 && dimension < ndim, 4, "Dimension of out bounds");
sliceSize = THCTensor_(size)(state, input, dimension);
slices = THCTensor_(nElement)(state, input) / sliceSize;
// Resize output value, index Tensors to appropriate sizes (i.e. the same as
// the input Tensor, except at dim=dimension, the size is 1)
THCTensor_preserveReduceDimSemantics(
state, values, ndim, dimension, keepdim);
THCTensor_preserveReduceDimSemantics(
state, indices, ndim, dimension, keepdim);
dim = THCTensor_(newSizeOf)(state, input);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, values, dim, NULL);
THCudaLongTensor_resize(state, indices, dim, NULL);
THLongStorage_free(dim);
// If sliceSize is 1, copy input to values and set indices
if (sliceSize == 1) {
THCTensor_(copy)(state, values, input);
THCudaLongTensor_fill(state, indices, TH_INDEX_BASE);
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
return;
}
// Requirements for fused kernel implementation:
//
// 1. sliceSize <= 2 * max threads per block
// 2. uses one block per slice, so number of slices must be less than the maximum number of blocks for
// a kernel launch
// 3. Can use 32-bit index math for indexing (mainly just for implementation conciseness, could be changed)
if (sliceSize <= MAX_BLOCK_SIZE &&
slices <= MAX_GRID_SIZE &&
THCTensor_canUse32BitIndexMath(state, input)) {
// Beginning our optimized implementation. First thing we want to do is to transpose
// the input Tensor along the sort dimension, and then make it contiguous
transposed = THCTensor_(newTranspose)(state, input, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim-1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim-1);
// Set-up TensorInfo structs for passing to kernel
TensorInfo<real, unsigned int> tiValues = getTensorInfo<real, THCTensor, unsigned int>(state, valuesTransposed);
TensorInfo<int64_t, unsigned int> tiIndices = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indicesTransposed);
// The number of blocks is the number of slices that we need to calculate the mode for. Each block
// is responsible for computing a single mode
dim3 grid;
THC_getGridFromTiles(slices, grid);
// The blocksize is two elements per thread, rounded up to the nearest power of 2
int64_t ceilPowerOf2 = nextHighestPowerOf2(sliceSize);
// Macro that calls kernel --> note that we set the block dimensions here, and
// the amount of shared memory
#define HANDLE_MODE(SIZE) \
{ \
dim3 blockSize(SIZE / 2); \
\
int memsize = (sizeof(real) * SIZE) + (2 * SIZE * sizeof(unsigned int)); \
computeMode<real, SIZE> \
<<<grid, blockSize, memsize, THCState_getCurrentStream(state)>>>( \
THCTensor_(data)(state, contiguous), tiValues, tiIndices, sliceSize); \
}
// Tradeoff between compilation time and the number of specializations. Ideally we would have
// one HANDLE_MODE for each power of 2
switch(ceilPowerOf2) {
case 2048:
HANDLE_MODE(2048)
break;
case 1024:
case 512:
case 256:
HANDLE_MODE(1024)
break;
case 128:
case 64:
HANDLE_MODE(128)
break;
case 32:
case 16:
case 8:
case 4:
case 2:
HANDLE_MODE(32)
break;
case 1:
default:
assert(false);
}
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, transposed);
THCTensor_(free)(state, contiguous);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
} else {
// Beginning our naive implementation: We don't want to mutate the input Tensor, but
// we need to be able to sort the inputs along the dimension in order to calculate the
// mode. Additionally, its ideal if the data along the dimension is contiguous. So
// we transpose the dimension with the innermost dimension and make a new contiguous
// version that we can use.
transposed = THCTensor_(newClone)(state, input);
THCTensor_(transpose)(state, transposed, NULL, dimension, ndim - 1);
contiguous = THCTensor_(newContiguous)(state, transposed);
THCTensor_(free)(state, transposed);
// We also need to view the values and indices Tensors as transposed in order to
// properly determine the offset into the underlying storage in which to place the
// mode and index for a particular set of dimension values
valuesTransposed = THCTensor_(newTranspose)(state, values, dimension, ndim - 1);
indicesTransposed = THCudaLongTensor_newTranspose(state, indices, dimension, ndim - 1);
// Position is a Storage that will store the dimension values we are processing
position = THLongStorage_newWithSize(ndim - 1);
// Sort Buffer is a Storage that will be used in the internal sort required to calculate
// the mode efficiently
sortBuffer = THCudaLongStorage_newWithSize(state, sliceSize);
// Call mode
THCTensor_(dimApplyMode)(state, valuesTransposed, indicesTransposed, contiguous, sortBuffer, dimension, position, 0);
THCTensor_(free)(state, contiguous);
THLongStorage_free(position);
THCTensor_(free)(state, valuesTransposed);
THCudaLongTensor_free(state, indicesTransposed);
THCudaLongStorage_free(state, sortBuffer);
}
if (!keepdim) {
THCTensor_(squeeze1d)(state, values, values, dimension);
THCudaLongTensor_squeeze1d(state, indices, indices, dimension);
}
}
#undef MAX_GRID_SIZE
#undef MAX_BLOCK_SIZE
#endif
|
5af0560074896f50ace3e2ee20d40dfbad12e5d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
hipCtx_t hContext = 0;
#define CUDA_CHECK( fn ) do { \
hipError_t status = (fn); \
if ( hipSuccess != status ) { \
const char* errstr; \
hipGetErrorString(status, &errstr); \
printf("CUDA Driver Failure (line %d of file %s):\n\t%s returned 0x%x (%s)\n", __LINE__, __FILE__, #fn, status, errstr); \
exit(EXIT_FAILURE); \
} \
} while (0)
void gflops(const char* ident, int N, float ms, int repeat)
{
double msecPerMatrixMul = ms / repeat;
long int Gflops = (128*16*1024)*1024*2.0*1e-9f;//*2 for MUL and ADD
double gigaFlops = (Gflops) / (msecPerMatrixMul) * 1000.0f ;
printf("ms = %f \n", msecPerMatrixMul);
printf("%s GFLOPS: %.2f (size: %d, iterations: %d)\n", ident, gigaFlops, N, repeat);
}
int main()
{
//-----------------sample_data_config---------------------
int NBLOCK = 1024;
int N = 2048*NBLOCK + 1024*30;//1024032;//1023985; 160768.0*4/1024/1024=0.61328125MB
int M = 1024;//16;
int P = 2048*NBLOCK;
size_t sizeSampleFloat = N * sizeof(float);
size_t sizeFilterFloat = M * sizeof(float);//16 * 4;
size_t sizeResultFloat = P * sizeof(float);
int repeat = 4;
dim3 threads(128, 1, 1);
dim3 grid(NBLOCK, 1, 1);
hipError_t error;
char deviceName[32];
int count, ordinal, major, minor;
hipDevice_t hDevice;
hipEvent_t hStart, hStop;
hipDeviceptr_t devH, devX, devY;
// ------Initialize the Driver API and find a device-----
CUDA_CHECK(hipInit(0));
CUDA_CHECK(hipGetDeviceCount(&count));
for (ordinal = 0; ordinal < count; ordinal++)
{
CUDA_CHECK(hipDeviceGet(&hDevice, ordinal));
CUDA_CHECK(hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hDevice));
CUDA_CHECK(hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hDevice));
CUDA_CHECK(hipDeviceGetName(deviceName, sizeof(deviceName), hDevice));
if (major >= 5 && minor >= 2)
{
//printf("Using: Id:%d %s (%d.%d)\n\n", ordinal, deviceName, major, minor);
break;
}
}
if (ordinal == count)
{
printf("No compute 5.0 device found, exiting.\n");
exit(EXIT_FAILURE);
}
//-----------------device_test------------------------
int device = 0;
error = hipSetDevice(0);
if (error != hipSuccess)
{
printf("device error");
exit(EXIT_FAILURE);
}
else printf("device: %d \n", device);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, 0);
if (error != hipSuccess)
{
printf("DeviceProperties error");
exit(EXIT_FAILURE);
}
//-----------------------host----------------------------
float* H = (float*)malloc(sizeFilterFloat);
float* X = (float*)malloc(sizeSampleFloat);
float* Y = (float*)malloc(sizeResultFloat);
float* T = (float*)malloc(sizeResultFloat);
for (int i = 0; i < N ; i++) //
{
X[i] = (float)rand()/1000;//(float)1.0;//
//if(X[i] == (float)16) X[i]=0;
}
for (int i = 0; i < M; i++) //
{
H[i] = (float)rand()/1000;//(float)i;// (i % 2);//(float)rand();//(float)1.0;//
}
for (int i = 0; i < P; i++) //
{
Y[i] = (float)0.0;
T[i] = (float)0.0;
}
//conv calculate
for (int i = 0; i < P; i++)
{
int k = i;
for (int j = 1024; j > 0; j--)
{
T[i] += H[j - 1] * X[k];
k++;
}
}
//-----------------------Dev----------------------------
CUDA_CHECK(hipCtxCreate(&hContext, 0, hDevice));
CUDA_CHECK(hipEventCreate(&hStart, hipEventBlockingSync)); // hipEventDefault
CUDA_CHECK(hipEventCreate(&hStop, hipEventBlockingSync));
CUDA_CHECK(cuMemAlloc(&devH, sizeFilterFloat));
CUDA_CHECK(cuMemAlloc(&devX, sizeSampleFloat));
CUDA_CHECK(cuMemAlloc(&devY, sizeResultFloat));
CUDA_CHECK(cuMemcpyHtoD(devH, H, sizeFilterFloat));
CUDA_CHECK(cuMemcpyHtoD(devX, X, sizeSampleFloat));
//---------------------Kernel----------------------------
printf("Computing result using CUDA Kernel...\n");
// Load the cubin
hipModule_t hModule;
CUDA_CHECK(hipModuleLoad(&hModule, "conv.cubin"));
// Load the kernel function
hipFunction_t hKernel;
CUDA_CHECK(hipModuleGetFunction(&hKernel, hModule, "conv_kernel_128"));
void * params[] = {&devH, &devX, &devY};
float totalTime = 0;
// Launch the kernel repeat times.. but break it up into pieces so as not to lock things up.
CUDA_CHECK(hipEventCreate(&hStart, hipEventBlockingSync)); // hipEventDefault
CUDA_CHECK(hipEventCreate(&hStop, hipEventBlockingSync));
while (repeat > 0)
{
float ms;
int r = repeat;
CUDA_CHECK(hipEventRecord(hStart, NULL));
for (int i = 0; i < repeat; i++)
CUDA_CHECK(hipModuleLaunchKernel(hKernel, grid.x, 1, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(hipEventRecord(hStop, NULL));
CUDA_CHECK(hipEventSynchronize(hStop));
CUDA_CHECK(hipEventElapsedTime(&ms, hStart, hStop));
totalTime += ms;
//gflops("conv_kernel_128", P, totalTime, repeat);
repeat -= r;
}
//CUDA_CHECK(hipModuleLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
//CUDA_CHECK(hipModuleLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(hipModuleUnload(hModule));
// Copy result from device to host
CUDA_CHECK(cuMemcpyDtoH(Y, devY, sizeResultFloat));
//CUDA_CHECK(cuMemcpyDtoH(H, devH, sizeFilterFloat));
//CUDA_CHECK(cuMemcpyDtoH(X, devX, sizeSampleFloat));
//for (int i = 0; i<20; i++) {
//if (Y[i] != 0.0f)
// printf("Y[%d] = %f \n", i, Y[i]);
//}
//for (int i = 2048*0; i<2048*780; i++) {
// if (Y[i] != T[i])//1024.0f)
// printf("Y[%d] = %f \n", i, Y[i]);
//}
//-----------------------free----------------------------
// Cleanup and shutdown of cuda
CUDA_CHECK(hipFree(devH));
CUDA_CHECK(hipFree(devX));
CUDA_CHECK(hipFree(devY));
for (int i = 0; i<1024*1; i++)
printf("Y[%d] = %f --- and --- T[%d] = %f error = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
for (int i = 0; i<P; i++)
{
if ( (Y[i]-T[i] > 1) || (Y[i]-T[i] < -1) )
printf("Y[%d] = %f --- but --- T[%d] = %f error = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
}
//for (int i = 2048*0; i<2048*1; i++) {
//if (Y[i] != 1024.0f)
// printf("T[%d] = %f \n", i, T[i]);
//}
free(H);
free(X);
free(Y);
free(T);
CUDA_CHECK(hipEventDestroy(hStart));
CUDA_CHECK(hipEventDestroy(hStop));
CUDA_CHECK(hipCtxDestroy(hContext));
hContext = 0;
hipDeviceReset();
printf("done\n");
return EXIT_SUCCESS;
}
| 5af0560074896f50ace3e2ee20d40dfbad12e5d5.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
CUcontext hContext = 0;
#define CUDA_CHECK( fn ) do { \
CUresult status = (fn); \
if ( CUDA_SUCCESS != status ) { \
const char* errstr; \
cuGetErrorString(status, &errstr); \
printf("CUDA Driver Failure (line %d of file %s):\n\t%s returned 0x%x (%s)\n", __LINE__, __FILE__, #fn, status, errstr); \
exit(EXIT_FAILURE); \
} \
} while (0)
void gflops(const char* ident, int N, float ms, int repeat)
{
double msecPerMatrixMul = ms / repeat;
long int Gflops = (128*16*1024)*1024*2.0*1e-9f;//*2 for MUL and ADD
double gigaFlops = (Gflops) / (msecPerMatrixMul) * 1000.0f ;
printf("ms = %f \n", msecPerMatrixMul);
printf("%s GFLOPS: %.2f (size: %d, iterations: %d)\n", ident, gigaFlops, N, repeat);
}
int main()
{
//-----------------sample_data_config---------------------
int NBLOCK = 1024;
int N = 2048*NBLOCK + 1024*30;//1024032;//1023985; 160768.0*4/1024/1024=0.61328125MB
int M = 1024;//16;
int P = 2048*NBLOCK;
size_t sizeSampleFloat = N * sizeof(float);
size_t sizeFilterFloat = M * sizeof(float);//16 * 4;
size_t sizeResultFloat = P * sizeof(float);
int repeat = 4;
dim3 threads(128, 1, 1);
dim3 grid(NBLOCK, 1, 1);
cudaError_t error;
char deviceName[32];
int count, ordinal, major, minor;
CUdevice hDevice;
CUevent hStart, hStop;
CUdeviceptr devH, devX, devY;
// ------Initialize the Driver API and find a device-----
CUDA_CHECK(cuInit(0));
CUDA_CHECK(cuDeviceGetCount(&count));
for (ordinal = 0; ordinal < count; ordinal++)
{
CUDA_CHECK(cuDeviceGet(&hDevice, ordinal));
CUDA_CHECK(cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, hDevice));
CUDA_CHECK(cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, hDevice));
CUDA_CHECK(cuDeviceGetName(deviceName, sizeof(deviceName), hDevice));
if (major >= 5 && minor >= 2)
{
//printf("Using: Id:%d %s (%d.%d)\n\n", ordinal, deviceName, major, minor);
break;
}
}
if (ordinal == count)
{
printf("No compute 5.0 device found, exiting.\n");
exit(EXIT_FAILURE);
}
//-----------------device_test------------------------
int device = 0;
error = cudaSetDevice(0);
if (error != cudaSuccess)
{
printf("device error");
exit(EXIT_FAILURE);
}
else printf("device: %d \n", device);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, 0);
if (error != cudaSuccess)
{
printf("DeviceProperties error");
exit(EXIT_FAILURE);
}
//-----------------------host----------------------------
float* H = (float*)malloc(sizeFilterFloat);
float* X = (float*)malloc(sizeSampleFloat);
float* Y = (float*)malloc(sizeResultFloat);
float* T = (float*)malloc(sizeResultFloat);
for (int i = 0; i < N ; i++) //
{
X[i] = (float)rand()/1000;//(float)1.0;//
//if(X[i] == (float)16) X[i]=0;
}
for (int i = 0; i < M; i++) //
{
H[i] = (float)rand()/1000;//(float)i;// (i % 2);//(float)rand();//(float)1.0;//
}
for (int i = 0; i < P; i++) //
{
Y[i] = (float)0.0;
T[i] = (float)0.0;
}
//conv calculate
for (int i = 0; i < P; i++)
{
int k = i;
for (int j = 1024; j > 0; j--)
{
T[i] += H[j - 1] * X[k];
k++;
}
}
//-----------------------Dev----------------------------
CUDA_CHECK(cuCtxCreate(&hContext, 0, hDevice));
CUDA_CHECK(cuEventCreate(&hStart, CU_EVENT_BLOCKING_SYNC)); // CU_EVENT_DEFAULT
CUDA_CHECK(cuEventCreate(&hStop, CU_EVENT_BLOCKING_SYNC));
CUDA_CHECK(cuMemAlloc(&devH, sizeFilterFloat));
CUDA_CHECK(cuMemAlloc(&devX, sizeSampleFloat));
CUDA_CHECK(cuMemAlloc(&devY, sizeResultFloat));
CUDA_CHECK(cuMemcpyHtoD(devH, H, sizeFilterFloat));
CUDA_CHECK(cuMemcpyHtoD(devX, X, sizeSampleFloat));
//---------------------Kernel----------------------------
printf("Computing result using CUDA Kernel...\n");
// Load the cubin
CUmodule hModule;
CUDA_CHECK(cuModuleLoad(&hModule, "conv.cubin"));
// Load the kernel function
CUfunction hKernel;
CUDA_CHECK(cuModuleGetFunction(&hKernel, hModule, "conv_kernel_128"));
void * params[] = {&devH, &devX, &devY};
float totalTime = 0;
// Launch the kernel repeat times.. but break it up into pieces so as not to lock things up.
CUDA_CHECK(cuEventCreate(&hStart, CU_EVENT_BLOCKING_SYNC)); // CU_EVENT_DEFAULT
CUDA_CHECK(cuEventCreate(&hStop, CU_EVENT_BLOCKING_SYNC));
while (repeat > 0)
{
float ms;
int r = repeat;
CUDA_CHECK(cuEventRecord(hStart, NULL));
for (int i = 0; i < repeat; i++)
CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, 1, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(cuEventRecord(hStop, NULL));
CUDA_CHECK(cuEventSynchronize(hStop));
CUDA_CHECK(cuEventElapsedTime(&ms, hStart, hStop));
totalTime += ms;
//gflops("conv_kernel_128", P, totalTime, repeat);
repeat -= r;
}
//CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
//CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(cuModuleUnload(hModule));
// Copy result from device to host
CUDA_CHECK(cuMemcpyDtoH(Y, devY, sizeResultFloat));
//CUDA_CHECK(cuMemcpyDtoH(H, devH, sizeFilterFloat));
//CUDA_CHECK(cuMemcpyDtoH(X, devX, sizeSampleFloat));
//for (int i = 0; i<20; i++) {
//if (Y[i] != 0.0f)
// printf("Y[%d] = %f \n", i, Y[i]);
//}
//for (int i = 2048*0; i<2048*780; i++) {
// if (Y[i] != T[i])//1024.0f)
// printf("Y[%d] = %f \n", i, Y[i]);
//}
//-----------------------free----------------------------
// Cleanup and shutdown of cuda
CUDA_CHECK(cuMemFree(devH));
CUDA_CHECK(cuMemFree(devX));
CUDA_CHECK(cuMemFree(devY));
for (int i = 0; i<1024*1; i++)
printf("Y[%d] = %f --- and --- T[%d] = %f error = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
for (int i = 0; i<P; i++)
{
if ( (Y[i]-T[i] > 1) || (Y[i]-T[i] < -1) )
printf("Y[%d] = %f --- but --- T[%d] = %f error = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
}
//for (int i = 2048*0; i<2048*1; i++) {
//if (Y[i] != 1024.0f)
// printf("T[%d] = %f \n", i, T[i]);
//}
free(H);
free(X);
free(Y);
free(T);
CUDA_CHECK(cuEventDestroy(hStart));
CUDA_CHECK(cuEventDestroy(hStop));
CUDA_CHECK(cuCtxDestroy(hContext));
hContext = 0;
cudaDeviceReset();
printf("done\n");
return EXIT_SUCCESS;
}
|
b7fe4410fa3c076e063c95f86b442c8160b74487.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/OpMathType.h>
namespace at { namespace native {
const char acos_name[] = "acos";
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acos_string = jiterator_stringify(
template <typename T>
T acos(T a) {
return std::acos(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acos_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acos_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
const char asin_name[] = "asin";
void asin_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto asin_string = jiterator_stringify(
template <typename T>
T asin(T a) {
return std::asin(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "asin_name", [&]() {
jitted_gpu_kernel<
/*name=*/ asin_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, asin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "asin_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, common_dtype, "asin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
}
const char atan_name[] = "atan";
void atan_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto atan_string = jiterator_stringify(
template <typename T>
T atan(T a) {
return std::atan(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
jitted_gpu_kernel<
/*name=*/ atan_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, atan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "atan_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
}
const char sin_name[] = "sin";
void sin_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto sin_string = jiterator_stringify(
template <typename T>
T sin(T a) {
return std::sin(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sin_name", [&]() {
jitted_gpu_kernel<
/*name=*/ sin_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, sin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sin_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "sin_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sin(a);
});
});
}
}
const char cos_name[] = "cos";
void cos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto cos_string = jiterator_stringify(
template <typename T>
T cos(T a) {
return std::cos(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "cos_name", [&]() {
jitted_gpu_kernel<
/*name=*/ cos_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, cos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "cos_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "cos_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cos(a);
});
});
}
}
const char sinh_name[] = "sinh";
void sinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto sinh_string = jiterator_stringify(
template <typename T>
T sinh(T a) {
return std::sinh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ sinh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, sinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sinh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "sinh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
}
const char cosh_name[] = "cosh";
void cosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto cosh_string = jiterator_stringify(
template <typename T>
T cosh(T a) {
return std::cosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "cosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ cosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, cosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "cosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "cosh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
}
const char tanh_name[] = "tanh";
void tanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto tanh_string = jiterator_stringify(
template <typename T>
T tanh(T a) {
return std::tanh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ tanh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "tanh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
const char acosh_name[] = "acosh";
void acosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acosh_string = jiterator_stringify(
template <typename T>
T acosh(T a) {
return std::acosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acosh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
}
const char asinh_name[] = "asinh";
void asinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto asinh_string = jiterator_stringify(
template <typename T>
T asinh(T a) {
return std::asinh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "asinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ asinh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, asinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "asinh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "asinh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
}
const char atanh_name[] = "atanh";
void atanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto atanh_string = jiterator_stringify(
template <typename T>
T atanh(T a) {
return std::atanh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ atanh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, atanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atanh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "atanh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
}
const char tan_name[] = "tan";
void tan_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto tan_string = jiterator_stringify(
template <typename T>
T tan(T a) {
return std::tan(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "tan_name", [&]() {
jitted_gpu_kernel<
/*name=*/ tan_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, tan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "tan_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "tan_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tan(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
REGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);
}} // namespace at::native
| b7fe4410fa3c076e063c95f86b442c8160b74487.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/OpMathType.h>
namespace at { namespace native {
const char acos_name[] = "acos";
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acos_string = jiterator_stringify(
template <typename T>
T acos(T a) {
return std::acos(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acos_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acos_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
const char asin_name[] = "asin";
void asin_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto asin_string = jiterator_stringify(
template <typename T>
T asin(T a) {
return std::asin(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "asin_name", [&]() {
jitted_gpu_kernel<
/*name=*/ asin_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, asin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "asin_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, common_dtype, "asin_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
}
const char atan_name[] = "atan";
void atan_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto atan_string = jiterator_stringify(
template <typename T>
T atan(T a) {
return std::atan(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
jitted_gpu_kernel<
/*name=*/ atan_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, atan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "atan_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
}
const char sin_name[] = "sin";
void sin_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto sin_string = jiterator_stringify(
template <typename T>
T sin(T a) {
return std::sin(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sin_name", [&]() {
jitted_gpu_kernel<
/*name=*/ sin_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, sin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sin_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "sin_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sin(a);
});
});
}
}
const char cos_name[] = "cos";
void cos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto cos_string = jiterator_stringify(
template <typename T>
T cos(T a) {
return std::cos(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "cos_name", [&]() {
jitted_gpu_kernel<
/*name=*/ cos_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, cos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "cos_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "cos_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cos(a);
});
});
}
}
const char sinh_name[] = "sinh";
void sinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto sinh_string = jiterator_stringify(
template <typename T>
T sinh(T a) {
return std::sinh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ sinh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, sinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "sinh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "sinh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
}
const char cosh_name[] = "cosh";
void cosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto cosh_string = jiterator_stringify(
template <typename T>
T cosh(T a) {
return std::cosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "cosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ cosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, cosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "cosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "cosh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
}
const char tanh_name[] = "tanh";
void tanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto tanh_string = jiterator_stringify(
template <typename T>
T tanh(T a) {
return std::tanh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ tanh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "tanh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
const char acosh_name[] = "acosh";
void acosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto acosh_string = jiterator_stringify(
template <typename T>
T acosh(T a) {
return std::acosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acosh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
}
const char asinh_name[] = "asinh";
void asinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto asinh_string = jiterator_stringify(
template <typename T>
T asinh(T a) {
return std::asinh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "asinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ asinh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, asinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "asinh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "asinh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
}
const char atanh_name[] = "atanh";
void atanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto atanh_string = jiterator_stringify(
template <typename T>
T atanh(T a) {
return std::atanh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ atanh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, atanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atanh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "atanh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
}
const char tan_name[] = "tan";
void tan_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto tan_string = jiterator_stringify(
template <typename T>
T tan(T a) {
return std::tan(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "tan_name", [&]() {
jitted_gpu_kernel<
/*name=*/ tan_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, tan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "tan_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "tan_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tan(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
REGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);
}} // namespace at::native
|
1da425b23abb3aee9e7659fc3d2415945cb2b3cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Demos\GPU_RT_Demo.h"
#include <hiprand/hiprand_kernel.h> // cuRAND
#include "ppm/ppm.hpp"
using namespace RA;
// credit Roger Allen for the original
// credit: https://developer.nvidia.com/blog/accelerated-ray-tracing-cuda/
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const* const func, const char* const file, int const line)
{
if (result)
{
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable** world, hiprandState_t* local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0, 1.0, 1.0);
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0, 0.0, 0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void rand_init(hiprandState_t* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t* rand_state)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
// Original: Each thread gets same seed, a different sequence number, no offset
// hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
// BUGFIX, see Issue#2: Each thread gets different seed, same sequence for
// performance improvement of about 2x!
hiprand_init(1984 + pixel_index, 0, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3* fb, int max_x, int max_y, int ns, RA::camera** cam, hitable** world, hiprandState_t* rand_state)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
#define RND (hiprand_uniform(&local_rand_state))
__global__ void create_world(hitable** d_list, hitable** d_world, RA::camera** d_camera, int nx, int ny, hiprandState_t* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a + RND, 0.2, b + RND);
if (choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2,
new lambertian(vec3(RND * RND, RND * RND, RND * RND)));
}
else if (choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2,
new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f * RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22 * 22 + 1 + 3);
vec3 lookfrom(13, 2, 3);
vec3 lookat(0, 0, 0);
float dist_to_focus = 10.0; (lookfrom - lookat).length();
float aperture = 0.1;
*d_camera = new RA::camera(lookfrom,
lookat,
vec3(0, 1, 0),
30.0,
float(nx) / float(ny),
aperture,
dist_to_focus);
}
}
__global__ void free_world(hitable** d_list, hitable** d_world, RA::camera** d_camera)
{
for (int i = 0; i < 22 * 22 + 1 + 3; i++)
{
delete ((sphere*)d_list[i])->mat_ptr;
delete d_list[i];
}
delete* d_world;
delete* d_camera;
}
GPU_RT_Demo::GPU_RT_Demo()
{
int nx = 1200;
int ny = 800;
int ns = 100;
int tx = 24;
int ty = 24;
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx * ny;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3* fb;
checkCudaErrors(hipMallocManaged((void**)&fb, fb_size));
// allocate random state
hiprandState_t* d_rand_state;
checkCudaErrors(hipMalloc((void**)&d_rand_state, num_pixels * sizeof(hiprandState_t)));
hiprandState_t* d_rand_state2;
checkCudaErrors(hipMalloc((void**)&d_rand_state2, 1 * sizeof(hiprandState_t)));
// we need that 2nd random state to be initialized for the world creation
hipLaunchKernelGGL(( rand_init), dim3(1), dim3(1), 0, 0, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// make our world of hitables & the camera
hitable** d_list;
int num_hitables = 22 * 22 + 1 + 3;
checkCudaErrors(hipMalloc((void**)&d_list, num_hitables * sizeof(hitable*)));
hitable** d_world;
checkCudaErrors(hipMalloc((void**)&d_world, sizeof(hitable*)));
RA::camera** d_camera;
checkCudaErrors(hipMalloc((void**)&d_camera, sizeof(RA::camera*)));
hipLaunchKernelGGL(( create_world), dim3(1), dim3(1), 0, 0, d_list, d_world, d_camera, nx, ny, d_rand_state2);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
ppm img;
img.w = nx;
img.h = ny;
img.magic = "P3";
img.max = 255;
img.capacity = img.w * img.h * img.nchannels;
std::vector<unsigned char> data;
// Output FB as Image
//std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--)
{
for (int i = 0; i < nx; i++)
{
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb[pixel_index].r());
int ig = int(255.99 * fb[pixel_index].g());
int ib = int(255.99 * fb[pixel_index].b());
data.push_back(static_cast<int>(ir));
data.push_back(static_cast<int>(ig));
data.push_back(static_cast<int>(ib));
//std::cout << ir << " " << ig << " " << ib << "\n";
}
}
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
img.write("gpu_test.ppm", data);
// clean up
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( free_world), dim3(1), dim3(1), 0, 0, d_list, d_world, d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(d_rand_state2));
checkCudaErrors(hipFree(fb));
hipDeviceReset();
}
GPU_RT_Demo::~GPU_RT_Demo()
{
}
| 1da425b23abb3aee9e7659fc3d2415945cb2b3cb.cu | #include "Demos\GPU_RT_Demo.h"
#include <curand_kernel.h> // cuRAND
#include "ppm/ppm.hpp"
using namespace RA;
// credit Roger Allen for the original
// credit: https://developer.nvidia.com/blog/accelerated-ray-tracing-cuda/
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const* const func, const char* const file, int const line)
{
if (result)
{
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable** world, curandState* local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0, 1.0, 1.0);
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if (rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0, 0.0, 0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f * (unit_direction.y() + 1.0f);
vec3 c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void rand_init(curandState* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curand_init(1984, 0, 0, rand_state);
}
}
__global__ void render_init(int max_x, int max_y, curandState* rand_state)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
// Original: Each thread gets same seed, a different sequence number, no offset
// curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
// BUGFIX, see Issue#2: Each thread gets different seed, same sequence for
// performance improvement of about 2x!
curand_init(1984 + pixel_index, 0, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3* fb, int max_x, int max_y, int ns, RA::camera** cam, hitable** world, curandState* rand_state)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
#define RND (curand_uniform(&local_rand_state))
__global__ void create_world(hitable** d_list, hitable** d_world, RA::camera** d_camera, int nx, int ny, curandState* rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0, -1000.0, -1), 1000,
new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for (int a = -11; a < 11; a++) {
for (int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a + RND, 0.2, b + RND);
if (choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2,
new lambertian(vec3(RND * RND, RND * RND, RND * RND)));
}
else if (choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2,
new metal(vec3(0.5f * (1.0f + RND), 0.5f * (1.0f + RND), 0.5f * (1.0f + RND)), 0.5f * RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1, 0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22 * 22 + 1 + 3);
vec3 lookfrom(13, 2, 3);
vec3 lookat(0, 0, 0);
float dist_to_focus = 10.0; (lookfrom - lookat).length();
float aperture = 0.1;
*d_camera = new RA::camera(lookfrom,
lookat,
vec3(0, 1, 0),
30.0,
float(nx) / float(ny),
aperture,
dist_to_focus);
}
}
__global__ void free_world(hitable** d_list, hitable** d_world, RA::camera** d_camera)
{
for (int i = 0; i < 22 * 22 + 1 + 3; i++)
{
delete ((sphere*)d_list[i])->mat_ptr;
delete d_list[i];
}
delete* d_world;
delete* d_camera;
}
GPU_RT_Demo::GPU_RT_Demo()
{
int nx = 1200;
int ny = 800;
int ns = 100;
int tx = 24;
int ty = 24;
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx * ny;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3* fb;
checkCudaErrors(cudaMallocManaged((void**)&fb, fb_size));
// allocate random state
curandState* d_rand_state;
checkCudaErrors(cudaMalloc((void**)&d_rand_state, num_pixels * sizeof(curandState)));
curandState* d_rand_state2;
checkCudaErrors(cudaMalloc((void**)&d_rand_state2, 1 * sizeof(curandState)));
// we need that 2nd random state to be initialized for the world creation
rand_init<<<1, 1>>>(d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// make our world of hitables & the camera
hitable** d_list;
int num_hitables = 22 * 22 + 1 + 3;
checkCudaErrors(cudaMalloc((void**)&d_list, num_hitables * sizeof(hitable*)));
hitable** d_world;
checkCudaErrors(cudaMalloc((void**)&d_world, sizeof(hitable*)));
RA::camera** d_camera;
checkCudaErrors(cudaMalloc((void**)&d_camera, sizeof(RA::camera*)));
create_world<<<1, 1>>>(d_list, d_world, d_camera, nx, ny, d_rand_state2);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
render_init<<<blocks, threads>>>(nx, ny, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
render<<<blocks, threads>>>(fb, nx, ny, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
ppm img;
img.w = nx;
img.h = ny;
img.magic = "P3";
img.max = 255;
img.capacity = img.w * img.h * img.nchannels;
std::vector<unsigned char> data;
// Output FB as Image
//std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--)
{
for (int i = 0; i < nx; i++)
{
size_t pixel_index = j * nx + i;
int ir = int(255.99 * fb[pixel_index].r());
int ig = int(255.99 * fb[pixel_index].g());
int ib = int(255.99 * fb[pixel_index].b());
data.push_back(static_cast<int>(ir));
data.push_back(static_cast<int>(ig));
data.push_back(static_cast<int>(ib));
//std::cout << ir << " " << ig << " " << ib << "\n";
}
}
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
img.write("gpu_test.ppm", data);
// clean up
checkCudaErrors(cudaDeviceSynchronize());
free_world<<<1, 1>>>(d_list, d_world, d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(d_rand_state2));
checkCudaErrors(cudaFree(fb));
cudaDeviceReset();
}
GPU_RT_Demo::~GPU_RT_Demo()
{
}
|
5aa685db8637024966b95c0e617550b502c7f2f2.hip | // !!! This is a file automatically generated by hipify!!!
/*
Provides an interface to the CUFFT package.
Testing examples can be found in ~src/mat/examples/tests
*/
#include <petsc/private/matimpl.h> /*I "petscmat.h" I*/
EXTERN_C_BEGIN
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
EXTERN_C_END
typedef struct {
PetscInt ndim;
PetscInt *dim;
hipfftHandle p_forward, p_backward;
hipfftComplex *devArray;
} Mat_CUFFT;
#undef __FUNCT__
#define __FUNCT__ "MatMult_SeqCUFFT"
PetscErrorCode MatMult_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
hipfftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
hipfftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecGetArray(x, &x_array);CHKERRQ(ierr);
ierr = VecGetArray(y, &y_array);CHKERRQ(ierr);
if (!cufft->p_forward) {
hipfftResult result;
/* create a plan, then execute it */
switch (ndim) {
case 1:
result = hipfftPlan1d(&cufft->p_forward, dim[0], HIPFFT_C2C, 1);CHKERRQ(result != HIPFFT_SUCCESS);
break;
case 2:
result = hipfftPlan2d(&cufft->p_forward, dim[0], dim[1], HIPFFT_C2C);CHKERRQ(result != HIPFFT_SUCCESS);
break;
case 3:
result = hipfftPlan3d(&cufft->p_forward, dim[0], dim[1], dim[2], HIPFFT_C2C);CHKERRQ(result != HIPFFT_SUCCESS);
break;
default:
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %d-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
hipMemcpy(devArray, x_array, sizeof(hipfftComplex)*dim[ndim], hipMemcpyHostToDevice);
/* execute transform */
result = hipfftExecC2C(cufft->p_forward, devArray, devArray, HIPFFT_FORWARD);CHKERRQ(result != HIPFFT_SUCCESS);
/* transfer from GPU memory */
hipMemcpy(y_array, devArray, sizeof(hipfftComplex)*dim[ndim], hipMemcpyDeviceToHost);
ierr = VecRestoreArray(y, &y_array);CHKERRQ(ierr);
ierr = VecRestoreArray(x, &x_array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatMultTranspose_SeqCUFFT"
PetscErrorCode MatMultTranspose_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
hipfftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
hipfftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecGetArray(x, &x_array);CHKERRQ(ierr);
ierr = VecGetArray(y, &y_array);CHKERRQ(ierr);
if (!cufft->p_backward) {
/* create a plan, then execute it */
switch (ndim) {
case 1:
result = hipfftPlan1d(&cufft->p_backward, dim[0], HIPFFT_C2C, 1);CHKERRQ(result != HIPFFT_SUCCESS);
break;
case 2:
result = hipfftPlan2d(&cufft->p_backward, dim[0], dim[1], HIPFFT_C2C);CHKERRQ(result != HIPFFT_SUCCESS);
break;
case 3:
result = hipfftPlan3d(&cufft->p_backward, dim[0], dim[1], dim[2], HIPFFT_C2C);CHKERRQ(result != HIPFFT_SUCCESS);
break;
default:
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %d-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
hipMemcpy(devArray, x_array, sizeof(hipfftComplex)*dim[ndim], hipMemcpyHostToDevice);
/* execute transform */
result = hipfftExecC2C(cufft->p_forward, devArray, devArray, HIPFFT_BACKWARD);CHKERRQ(result != HIPFFT_SUCCESS);
/* transfer from GPU memory */
hipMemcpy(y_array, devArray, sizeof(hipfftComplex)*dim[ndim], hipMemcpyDeviceToHost);
ierr = VecRestoreArray(y, &y_array);CHKERRQ(ierr);
ierr = VecRestoreArray(x, &x_array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatDestroy_SeqCUFFT"
PetscErrorCode MatDestroy_SeqCUFFT(Mat A)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
hipfftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscFree(cufft->dim);CHKERRQ(ierr);
if (cufft->p_forward) {result = hipfftDestroy(cufft->p_forward);CHKERRQ(result != HIPFFT_SUCCESS);}
if (cufft->p_backward) {result = hipfftDestroy(cufft->p_backward);CHKERRQ(result != HIPFFT_SUCCESS);}
hipFree(cufft->devArray);
ierr = PetscFree(A->data);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatCreateSeqCUFFT"
/*@
MatCreateSeqCUFFT - Creates a matrix object that provides sequential FFT via the external package CUFFT
Collective on MPI_Comm
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. ndim - the ndim-dimensional transform
- dim - array of size ndim, dim[i] contains the vector length in the i-dimension
Output Parameter:
. A - the matrix
Options Database Keys:
. -mat_cufft_plannerflags - set CUFFT planner flags
Level: intermediate
@*/
PetscErrorCode MatCreateSeqCUFFT(MPI_Comm comm, PetscInt ndim, const PetscInt dim[], Mat *A)
{
Mat_CUFFT *cufft;
PetscInt m, d;
PetscErrorCode ierr;
PetscFunctionBegin;
if (ndim < 0) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "ndim %d must be > 0", ndim);
ierr = MatCreate(comm, A);CHKERRQ(ierr);
m = 1;
for (d = 0; d < ndim; ++d) {
if (dim[d] < 0) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_USER, "dim[%d]=%d must be > 0", d, dim[d]);
m *= dim[d];
}
ierr = MatSetSizes(*A, m, m, m, m);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)*A, MATSEQCUFFT);CHKERRQ(ierr);
ierr = PetscNewLog(*A,&cufft);CHKERRQ(ierr);
(*A)->data = (void*) cufft;
ierr = PetscMalloc1(ndim+1, &cufft->dim);CHKERRQ(ierr);
ierr = PetscMemcpy(cufft->dim, dim, ndim*sizeof(PetscInt));CHKERRQ(ierr);
cufft->ndim = ndim;
cufft->p_forward = 0;
cufft->p_backward = 0;
cufft->dim[ndim] = m;
/* GPU memory allocation */
hipMalloc((void**) &cufft->devArray, sizeof(hipfftComplex)*m);
(*A)->ops->mult = MatMult_SeqCUFFT;
(*A)->ops->multtranspose = MatMultTranspose_SeqCUFFT;
(*A)->assembled = PETSC_TRUE;
(*A)->ops->destroy = MatDestroy_SeqCUFFT;
/* get runtime options */
ierr = PetscOptionsBegin(comm, ((PetscObject)(*A))->prefix, "CUFFT Options", "Mat");CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
| 5aa685db8637024966b95c0e617550b502c7f2f2.cu |
/*
Provides an interface to the CUFFT package.
Testing examples can be found in ~src/mat/examples/tests
*/
#include <petsc/private/matimpl.h> /*I "petscmat.h" I*/
EXTERN_C_BEGIN
#include <cuda.h>
#include <cuda_runtime.h>
#include <cufft.h>
EXTERN_C_END
typedef struct {
PetscInt ndim;
PetscInt *dim;
cufftHandle p_forward, p_backward;
cufftComplex *devArray;
} Mat_CUFFT;
#undef __FUNCT__
#define __FUNCT__ "MatMult_SeqCUFFT"
PetscErrorCode MatMult_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
cufftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
cufftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecGetArray(x, &x_array);CHKERRQ(ierr);
ierr = VecGetArray(y, &y_array);CHKERRQ(ierr);
if (!cufft->p_forward) {
cufftResult result;
/* create a plan, then execute it */
switch (ndim) {
case 1:
result = cufftPlan1d(&cufft->p_forward, dim[0], CUFFT_C2C, 1);CHKERRQ(result != CUFFT_SUCCESS);
break;
case 2:
result = cufftPlan2d(&cufft->p_forward, dim[0], dim[1], CUFFT_C2C);CHKERRQ(result != CUFFT_SUCCESS);
break;
case 3:
result = cufftPlan3d(&cufft->p_forward, dim[0], dim[1], dim[2], CUFFT_C2C);CHKERRQ(result != CUFFT_SUCCESS);
break;
default:
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %d-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
cudaMemcpy(devArray, x_array, sizeof(cufftComplex)*dim[ndim], cudaMemcpyHostToDevice);
/* execute transform */
result = cufftExecC2C(cufft->p_forward, devArray, devArray, CUFFT_FORWARD);CHKERRQ(result != CUFFT_SUCCESS);
/* transfer from GPU memory */
cudaMemcpy(y_array, devArray, sizeof(cufftComplex)*dim[ndim], cudaMemcpyDeviceToHost);
ierr = VecRestoreArray(y, &y_array);CHKERRQ(ierr);
ierr = VecRestoreArray(x, &x_array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatMultTranspose_SeqCUFFT"
PetscErrorCode MatMultTranspose_SeqCUFFT(Mat A, Vec x, Vec y)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
cufftComplex *devArray = cufft->devArray;
PetscInt ndim = cufft->ndim, *dim = cufft->dim;
PetscScalar *x_array, *y_array;
cufftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = VecGetArray(x, &x_array);CHKERRQ(ierr);
ierr = VecGetArray(y, &y_array);CHKERRQ(ierr);
if (!cufft->p_backward) {
/* create a plan, then execute it */
switch (ndim) {
case 1:
result = cufftPlan1d(&cufft->p_backward, dim[0], CUFFT_C2C, 1);CHKERRQ(result != CUFFT_SUCCESS);
break;
case 2:
result = cufftPlan2d(&cufft->p_backward, dim[0], dim[1], CUFFT_C2C);CHKERRQ(result != CUFFT_SUCCESS);
break;
case 3:
result = cufftPlan3d(&cufft->p_backward, dim[0], dim[1], dim[2], CUFFT_C2C);CHKERRQ(result != CUFFT_SUCCESS);
break;
default:
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "Cannot create plan for %d-dimensional transform", ndim);
}
}
/* transfer to GPU memory */
cudaMemcpy(devArray, x_array, sizeof(cufftComplex)*dim[ndim], cudaMemcpyHostToDevice);
/* execute transform */
result = cufftExecC2C(cufft->p_forward, devArray, devArray, CUFFT_INVERSE);CHKERRQ(result != CUFFT_SUCCESS);
/* transfer from GPU memory */
cudaMemcpy(y_array, devArray, sizeof(cufftComplex)*dim[ndim], cudaMemcpyDeviceToHost);
ierr = VecRestoreArray(y, &y_array);CHKERRQ(ierr);
ierr = VecRestoreArray(x, &x_array);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatDestroy_SeqCUFFT"
PetscErrorCode MatDestroy_SeqCUFFT(Mat A)
{
Mat_CUFFT *cufft = (Mat_CUFFT*) A->data;
cufftResult result;
PetscErrorCode ierr;
PetscFunctionBegin;
ierr = PetscFree(cufft->dim);CHKERRQ(ierr);
if (cufft->p_forward) {result = cufftDestroy(cufft->p_forward);CHKERRQ(result != CUFFT_SUCCESS);}
if (cufft->p_backward) {result = cufftDestroy(cufft->p_backward);CHKERRQ(result != CUFFT_SUCCESS);}
cudaFree(cufft->devArray);
ierr = PetscFree(A->data);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
#undef __FUNCT__
#define __FUNCT__ "MatCreateSeqCUFFT"
/*@
MatCreateSeqCUFFT - Creates a matrix object that provides sequential FFT via the external package CUFFT
Collective on MPI_Comm
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. ndim - the ndim-dimensional transform
- dim - array of size ndim, dim[i] contains the vector length in the i-dimension
Output Parameter:
. A - the matrix
Options Database Keys:
. -mat_cufft_plannerflags - set CUFFT planner flags
Level: intermediate
@*/
PetscErrorCode MatCreateSeqCUFFT(MPI_Comm comm, PetscInt ndim, const PetscInt dim[], Mat *A)
{
Mat_CUFFT *cufft;
PetscInt m, d;
PetscErrorCode ierr;
PetscFunctionBegin;
if (ndim < 0) SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_USER, "ndim %d must be > 0", ndim);
ierr = MatCreate(comm, A);CHKERRQ(ierr);
m = 1;
for (d = 0; d < ndim; ++d) {
if (dim[d] < 0) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_USER, "dim[%d]=%d must be > 0", d, dim[d]);
m *= dim[d];
}
ierr = MatSetSizes(*A, m, m, m, m);CHKERRQ(ierr);
ierr = PetscObjectChangeTypeName((PetscObject)*A, MATSEQCUFFT);CHKERRQ(ierr);
ierr = PetscNewLog(*A,&cufft);CHKERRQ(ierr);
(*A)->data = (void*) cufft;
ierr = PetscMalloc1(ndim+1, &cufft->dim);CHKERRQ(ierr);
ierr = PetscMemcpy(cufft->dim, dim, ndim*sizeof(PetscInt));CHKERRQ(ierr);
cufft->ndim = ndim;
cufft->p_forward = 0;
cufft->p_backward = 0;
cufft->dim[ndim] = m;
/* GPU memory allocation */
cudaMalloc((void**) &cufft->devArray, sizeof(cufftComplex)*m);
(*A)->ops->mult = MatMult_SeqCUFFT;
(*A)->ops->multtranspose = MatMultTranspose_SeqCUFFT;
(*A)->assembled = PETSC_TRUE;
(*A)->ops->destroy = MatDestroy_SeqCUFFT;
/* get runtime options */
ierr = PetscOptionsBegin(comm, ((PetscObject)(*A))->prefix, "CUFFT Options", "Mat");CHKERRQ(ierr);
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
|
fbae7ce0c5005a6fbffc560229b9f4e37a6fdbef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <time.h>
__global__ void adderboi(int * a, int * b, int * c)
{
int gid=threadIdx.x + blockIdx.x * blockDim.x;
c[gid] = a[gid] + b[gid];
}
int cpu_adder(int * a, int * b, int * c, int shape)
{
for(int i=0;i<shape;i++)
{
if(c[i]!=a[i]+b[i])
return 0;
}
return 1;
}
int main()
{
int shape=1<<22;
int size = shape*sizeof(int);
int b;
printf("Enter block size : ");
scanf("%d",&b);
dim3 grid(shape/b);
int * h_arr1;
int * h_arr2;
int * h_arr3;
h_arr1=(int *)malloc(size);
h_arr2=(int *)malloc(size);
h_arr3=(int *)malloc(size);
for(int i=0; i< shape; i++)
{
h_arr1[i]=(int)(rand() & 0x0f);
h_arr2[i]=(int)(rand() & 0x0f);
h_arr3[i]=0;
}
int * d_arr1;
int * d_arr2;
int * d_arr3;
hipMalloc((int**)&d_arr1, size);
hipMalloc((int**)&d_arr2, size);
hipMalloc((int**)&d_arr3, size);
hipMemcpy(d_arr1, h_arr1, size, hipMemcpyHostToDevice);
hipMemcpy(d_arr2, h_arr2, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( adderboi) , dim3(grid), dim3(b), 0, 0, d_arr1, d_arr2, d_arr3);
hipDeviceSynchronize();
hipMemcpy(h_arr3, d_arr3, size, hipMemcpyDeviceToHost);
printf(cpu_adder(h_arr1, h_arr2, h_arr3, shape)?"CPU and GPU values match\n":"CPU and GPU values donot match\n");
/*for(int i=0;i<shape;i++)
{
printf("%d\t+\t%d\t= %d\n", h_arr1[i], h_arr2[i], h_arr3[i]);
}*/
hipFree(d_arr1);
hipFree(d_arr2);
hipFree(d_arr3);
free(h_arr1);
free(h_arr2);
free(h_arr3);
hipDeviceReset();
} | fbae7ce0c5005a6fbffc560229b9f4e37a6fdbef.cu | #include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <time.h>
__global__ void adderboi(int * a, int * b, int * c)
{
int gid=threadIdx.x + blockIdx.x * blockDim.x;
c[gid] = a[gid] + b[gid];
}
int cpu_adder(int * a, int * b, int * c, int shape)
{
for(int i=0;i<shape;i++)
{
if(c[i]!=a[i]+b[i])
return 0;
}
return 1;
}
int main()
{
int shape=1<<22;
int size = shape*sizeof(int);
int b;
printf("Enter block size : ");
scanf("%d",&b);
dim3 grid(shape/b);
int * h_arr1;
int * h_arr2;
int * h_arr3;
h_arr1=(int *)malloc(size);
h_arr2=(int *)malloc(size);
h_arr3=(int *)malloc(size);
for(int i=0; i< shape; i++)
{
h_arr1[i]=(int)(rand() & 0x0f);
h_arr2[i]=(int)(rand() & 0x0f);
h_arr3[i]=0;
}
int * d_arr1;
int * d_arr2;
int * d_arr3;
cudaMalloc((int**)&d_arr1, size);
cudaMalloc((int**)&d_arr2, size);
cudaMalloc((int**)&d_arr3, size);
cudaMemcpy(d_arr1, h_arr1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr2, h_arr2, size, cudaMemcpyHostToDevice);
adderboi <<<grid, b>>> (d_arr1, d_arr2, d_arr3);
cudaDeviceSynchronize();
cudaMemcpy(h_arr3, d_arr3, size, cudaMemcpyDeviceToHost);
printf(cpu_adder(h_arr1, h_arr2, h_arr3, shape)?"CPU and GPU values match\n":"CPU and GPU values donot match\n");
/*for(int i=0;i<shape;i++)
{
printf("%d\t+\t%d\t= %d\n", h_arr1[i], h_arr2[i], h_arr3[i]);
}*/
cudaFree(d_arr1);
cudaFree(d_arr2);
cudaFree(d_arr3);
free(h_arr1);
free(h_arr2);
free(h_arr3);
cudaDeviceReset();
} |
298be282c3a0141ec79edefb076cfde6d75de514.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
// includes, project
#include <cutil.h>
//#define NUM 10
// includes, kernels
#include <NN_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
void NeuralNetwork();
unsigned g_verbose;
unsigned NUM;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 2) {
NUM = atoi(argv[1]);
for (i=2; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !NUM) {
printf("Usage: ./NN <NUM> [-v]\n");
printf("where NUM is the number of images to process in parallel (up to 10000 for the t10k-images-idx3-ubyte database file) and -v is used to display approximately what each image looks like.\n");
return 1;
}
NeuralNetwork();
//CUT_EXIT(argc, argv);
}
void InitGPUMem(float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU,float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU,float *Layer3_Weights_GPU,float *Layer4_Neurons_GPU,float *Layer4_Weights_GPU,float *Layer5_Neurons_GPU)
{
CUDA_SAFE_CALL(hipMalloc((void**) &Layer1_Neurons_GPU, sizeof(float)*29*29*NUM));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer1_Weights_GPU, sizeof(float)*156));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer2_Neurons_GPU, sizeof(float)*13*13*6*NUM));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer2_Weights_GPU, sizeof(float)*7800));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer3_Neurons_GPU, sizeof(float)*1250*NUM));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer3_Weights_GPU, sizeof(float)*125100));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer4_Neurons_GPU, sizeof(float)*100*NUM));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer4_Weights_GPU, sizeof(float)*1010));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer5_Neurons_GPU, sizeof(float)*10*NUM));
}
void InitHostMem(float *Layer1_Weights_CPU,float *Layer2_Weights_CPU,float *Layer3_Weights_CPU,float *Layer4_Weights_CPU)
{
// initial layer 1 weight
FILE * pFile1 = fopen ("data/lw1.wei","rb");
if (pFile1 != NULL)
{
for(int i=0;i<156;++i){
fread(&(Layer1_Weights_CPU[i]),sizeof(float),1,pFile1);
//printf("Layer1_Weights_CPU[%d]=%f\n", i, Layer1_Weights_CPU[i]);
}
fclose (pFile1);
}
// initial layer 2 weight
FILE * pFile2 = fopen ("data/lw2.wei","rb");
if (pFile2 != NULL)
{
fread(Layer2_Weights_CPU,sizeof(float),7800,pFile2);
fclose (pFile2);
}
// initial layer 3 weight
FILE * pFile3 = fopen ("data/lw3.wei","rb");
if (pFile3 != NULL)
{
fread(Layer3_Weights_CPU,sizeof(float),125100,pFile3);
fclose (pFile3);
}
// initial layer 4 weight
FILE * pFile4 = fopen ("data/lw4.wei","rb");
if (pFile4 != NULL)
{
fread(Layer4_Weights_CPU,sizeof(float),1010,pFile4);
fclose (pFile4);
}
if (!(pFile1 && pFile2 && pFile3 && pFile4))
{
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
}
int swapEndianInt( int bEnum )
{
int lEnum;
char *lE = (char*) &lEnum;
char *bE = (char*) &bEnum;
lE[0] = bE[3];
lE[1] = bE[2];
lE[2] = bE[1];
lE[3] = bE[0];
return lEnum;
}
void readIn(float *layer1)
{
FILE *fp;
unsigned int *foo;
unsigned int i,j;
foo = (unsigned int *) calloc(sizeof(unsigned int),1);
//unsigned char image[29*29*NUM];
unsigned char* image = (unsigned char*) malloc(29*29*NUM * sizeof(char));
for (i=0;i<(29*29*NUM);i++) image[i]=0;
fp=fopen("data/t10k-images-idx3-ubyte","rt");
//fp=fopen("in.neu","rb");
if(fp)
{
fread(foo,sizeof(int),1,fp);
printf("magic number = %d\n", swapEndianInt(foo[0]));
fread(foo,sizeof(int),1,fp);
printf("number of items = %d\n", swapEndianInt(foo[0]));
fread(foo,sizeof(int),1,fp);
printf("number of rows = %d\n", swapEndianInt(foo[0]));
fread(foo,sizeof(int),1,fp);
printf("number of rows = %d\n", swapEndianInt(foo[0]));
for (j=0;j<NUM;j++) {
for (i=0;i<28;i++)
fread((image+i*29+j*29*29),sizeof(char),28,fp);
}
//fread(layer1,sizeof(float),29*29,fp);
fclose(fp);
for (i=0;i<(29*29*NUM);i++)
layer1[i] = (1.0 - (float) image[i]/256);
}
else
{
printf("FAIL! data/t10k-images-idx3-ubyte NOT FOUND!\n");
exit(1);
}
}
void output(double *final)
{
int i;
FILE *fp=0;
fp=fopen("out.res","wb");
if(fp)
{
//for(i=0;i<10;i++) {
// printf("output[%d] = %e\n", i, final[i]);
//}
fwrite(final,sizeof(double),10,fp);
fclose(fp);
}
}
void NeuralNetwork()
{
int x,y;
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
else
CUDA_SAFE_CALL(hipSetDevice(dev));
//float Layer1_Neurons_CPU[29*29*NUM];
float *Layer1_Neurons_CPU = (float*) malloc (29*29*NUM * sizeof(float));
/*={
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,0,0,0,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};*/
readIn(Layer1_Neurons_CPU);
if (g_verbose) {
for(y=0;y< 29*NUM;y++) {
if(!(y%29)) printf("\n");
for (x=0;x<29;x++) {
if (Layer1_Neurons_CPU[y*29+x]<0.5) {
printf("0");
}
else printf(" ");
//printf("%d", (Layer1_Neurons_CPU[y*29+x]>0.5));
}
printf("\n");
}
}
float *Layer1_Neurons_GPU;
float Layer1_Weights_CPU[156];
float *Layer1_Weights_GPU;
float Layer2_Weights_CPU[7800];
float *Layer2_Weights_GPU;
float *Layer2_Neurons_GPU;
float Layer3_Weights_CPU[125100];
float *Layer3_Weights_GPU;
float *Layer3_Neurons_GPU;
float Layer4_Weights_CPU[1010];
float *Layer4_Weights_GPU;
float *Layer4_Neurons_GPU;
//float Layer5_Neurons_CPU[10*NUM];//={0,0,0,0,0,0,0,0,0,0};
float *Layer5_Neurons_CPU = (float*) malloc(10*NUM * sizeof(float));
for (x=0;x<10*NUM;x++) Layer5_Neurons_CPU[x]=0;
float *Layer5_Neurons_GPU;
double *outputLayer;
//unsigned int timer = 0;
//float totaltime = 0.0f;
//init input here
InitHostMem(Layer1_Weights_CPU,Layer2_Weights_CPU,Layer3_Weights_CPU,Layer4_Weights_CPU);
//allocate momory on Device
//InitGPUMem(Layer1_Neurons_GPU,Layer1_Weights_GPU,Layer2_Neurons_GPU,Layer2_Weights_GPU,Layer3_Neurons_GPU,Layer3_Weights_GPU,Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
CUDA_SAFE_CALL(hipMalloc((void**) &Layer1_Neurons_GPU, sizeof(float)*29*29*NUM));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer1_Weights_GPU, sizeof(float)*156));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer2_Neurons_GPU, sizeof(float)*13*13*6*NUM));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer2_Weights_GPU, sizeof(float)*7800));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer3_Neurons_GPU, sizeof(float)*1250*NUM));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer3_Weights_GPU, sizeof(float)*125100));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer4_Neurons_GPU, sizeof(float)*100*NUM));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer4_Weights_GPU, sizeof(float)*1010));
CUDA_SAFE_CALL(hipMalloc((void**) &Layer5_Neurons_GPU, sizeof(float)*10*NUM));
outputLayer = (double*)malloc(sizeof(double)*10*NUM);
//init 29x29 handwritting array
// already done in "initial"
//copy from CPU to GPU
CUDA_SAFE_CALL(hipMemcpy(Layer1_Neurons_GPU,Layer1_Neurons_CPU, sizeof(float)*29*29*NUM, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(Layer1_Weights_GPU,Layer1_Weights_CPU, sizeof(float)*156, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(Layer2_Weights_GPU,Layer2_Weights_CPU, sizeof(float)*7800, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(Layer3_Weights_GPU,Layer3_Weights_CPU, sizeof(float)*125100, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(Layer4_Weights_GPU,Layer4_Weights_CPU, sizeof(float)*1010, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(Layer5_Neurons_GPU,Layer5_Neurons_CPU, sizeof(float)*10*NUM, hipMemcpyHostToDevice));
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// CUT_SAFE_CALL(cutStartTimer(timer));
printf("NUM=%d\n", NUM);
dim3 Layer1_Block(6,NUM,1);
dim3 Layer1_Thread(13,13);
hipLaunchKernelGGL(( executeFirstLayer), dim3(Layer1_Block),dim3(Layer1_Thread), 0, 0, Layer1_Neurons_GPU,Layer1_Weights_GPU,Layer2_Neurons_GPU);
dim3 Layer2_Block(50,NUM,1);
dim3 Layer2_Thread(5,5);
hipLaunchKernelGGL(( executeSecondLayer), dim3(Layer2_Block),dim3(Layer2_Thread), 0, 0, Layer2_Neurons_GPU, Layer2_Weights_GPU,Layer3_Neurons_GPU);
dim3 Layer3_Block(100,NUM,1);
dim3 Layer3_Thread(1,1);
hipLaunchKernelGGL(( executeThirdLayer), dim3(Layer3_Block),dim3(Layer3_Thread), 0, 0, Layer3_Neurons_GPU, Layer3_Weights_GPU,Layer4_Neurons_GPU);
dim3 Layer4_Block(10,NUM,1);
dim3 Layer4_Thread(1,1);
hipLaunchKernelGGL(( executeFourthLayer), dim3(Layer4_Block),dim3(Layer4_Thread), 0, 0, Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
CUT_CHECK_ERROR("Kernel execution failed");
// CUT_SAFE_CALL(cutStopTimer(timer));
// totaltime = cutGetTimerValue(timer);
//copy from GPU to CPU
CUDA_SAFE_CALL(hipMemcpy(Layer5_Neurons_CPU,Layer5_Neurons_GPU, sizeof(float)*10*NUM, hipMemcpyDeviceToHost));
// stop and destroy timer
//printf("Processing time: %f (ms) \n", totaltime);
// CUT_SAFE_CALL(cutDeleteTimer(timer));
for(int a=0;a<10*NUM;a++)
{
//printf("output[%d]=%f\n", a, Layer5_Neurons_CPU[a]);
outputLayer[a] = (double)Layer5_Neurons_CPU[a];
if (!(a%10)) {
if (a) printf("%d ", y);
x=outputLayer[a];
y=0;
}
if (outputLayer[a]>x) {
x=outputLayer[a];
y=a%10;
}
}
printf("%d\n", y);
output(outputLayer);
/*
//float Layer4_Neurons_CPU[100*NUM];
float *Layer4_Neurons_CPU = (float*) malloc(100*NUM*sizeof(float));
CUDA_SAFE_CALL(hipMemcpy(Layer4_Neurons_CPU,Layer4_Neurons_GPU,sizeof(float)*100,hipMemcpyDeviceToHost));
FILE *fp=fopen("layer_4.neu","wb");
fwrite(Layer4_Neurons_CPU,sizeof(float),100*NUM,fp);
fclose(fp);
//float Layer3_Neurons_CPU[50*5*5*NUM];
float *Layer3_Neurons_CPU = (float*) malloc(50*5*5*NUM*sizeof(float));
CUDA_SAFE_CALL(hipMemcpy(Layer3_Neurons_CPU,Layer3_Neurons_GPU,sizeof(float)*50*5*5,hipMemcpyDeviceToHost));
fp=fopen("layer_3.neu","wb");
fwrite(Layer3_Neurons_CPU,sizeof(float),50*5*5*NUM,fp);
fclose(fp);
//float Layer2_Neurons_CPU[13*13*6*NUM];
float *Layer2_Neurons_CPU = (float*) malloc(13*13*6*NUM*sizeof(float));
CUDA_SAFE_CALL(hipMemcpy(Layer2_Neurons_CPU,Layer2_Neurons_GPU,sizeof(float)*13*13*6,hipMemcpyDeviceToHost));
fp=fopen("layer_2.neu","wb");
fwrite(Layer2_Neurons_CPU,sizeof(float),13*13*6*NUM,fp);
fclose(fp);
fp=fopen("layer_1.neu","wb");
fwrite(Layer1_Neurons_CPU,sizeof(float),29*29*NUM,fp);
fclose(fp); */
exit(0);
}
| 298be282c3a0141ec79edefb076cfde6d75de514.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
// includes, project
#include <cutil.h>
//#define NUM 10
// includes, kernels
#include <NN_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
void NeuralNetwork();
unsigned g_verbose;
unsigned NUM;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 2) {
NUM = atoi(argv[1]);
for (i=2; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !NUM) {
printf("Usage: ./NN <NUM> [-v]\n");
printf("where NUM is the number of images to process in parallel (up to 10000 for the t10k-images-idx3-ubyte database file) and -v is used to display approximately what each image looks like.\n");
return 1;
}
NeuralNetwork();
//CUT_EXIT(argc, argv);
}
void InitGPUMem(float *Layer1_Neurons_GPU,float *Layer1_Weights_GPU,float *Layer2_Neurons_GPU,float *Layer2_Weights_GPU,float *Layer3_Neurons_GPU,float *Layer3_Weights_GPU,float *Layer4_Neurons_GPU,float *Layer4_Weights_GPU,float *Layer5_Neurons_GPU)
{
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer1_Neurons_GPU, sizeof(float)*29*29*NUM));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer1_Weights_GPU, sizeof(float)*156));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer2_Neurons_GPU, sizeof(float)*13*13*6*NUM));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer2_Weights_GPU, sizeof(float)*7800));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer3_Neurons_GPU, sizeof(float)*1250*NUM));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer3_Weights_GPU, sizeof(float)*125100));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer4_Neurons_GPU, sizeof(float)*100*NUM));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer4_Weights_GPU, sizeof(float)*1010));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer5_Neurons_GPU, sizeof(float)*10*NUM));
}
void InitHostMem(float *Layer1_Weights_CPU,float *Layer2_Weights_CPU,float *Layer3_Weights_CPU,float *Layer4_Weights_CPU)
{
// initial layer 1 weight
FILE * pFile1 = fopen ("data/lw1.wei","rb");
if (pFile1 != NULL)
{
for(int i=0;i<156;++i){
fread(&(Layer1_Weights_CPU[i]),sizeof(float),1,pFile1);
//printf("Layer1_Weights_CPU[%d]=%f\n", i, Layer1_Weights_CPU[i]);
}
fclose (pFile1);
}
// initial layer 2 weight
FILE * pFile2 = fopen ("data/lw2.wei","rb");
if (pFile2 != NULL)
{
fread(Layer2_Weights_CPU,sizeof(float),7800,pFile2);
fclose (pFile2);
}
// initial layer 3 weight
FILE * pFile3 = fopen ("data/lw3.wei","rb");
if (pFile3 != NULL)
{
fread(Layer3_Weights_CPU,sizeof(float),125100,pFile3);
fclose (pFile3);
}
// initial layer 4 weight
FILE * pFile4 = fopen ("data/lw4.wei","rb");
if (pFile4 != NULL)
{
fread(Layer4_Weights_CPU,sizeof(float),1010,pFile4);
fclose (pFile4);
}
if (!(pFile1 && pFile2 && pFile3 && pFile4))
{
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
}
int swapEndianInt( int bEnum )
{
int lEnum;
char *lE = (char*) &lEnum;
char *bE = (char*) &bEnum;
lE[0] = bE[3];
lE[1] = bE[2];
lE[2] = bE[1];
lE[3] = bE[0];
return lEnum;
}
void readIn(float *layer1)
{
FILE *fp;
unsigned int *foo;
unsigned int i,j;
foo = (unsigned int *) calloc(sizeof(unsigned int),1);
//unsigned char image[29*29*NUM];
unsigned char* image = (unsigned char*) malloc(29*29*NUM * sizeof(char));
for (i=0;i<(29*29*NUM);i++) image[i]=0;
fp=fopen("data/t10k-images-idx3-ubyte","rt");
//fp=fopen("in.neu","rb");
if(fp)
{
fread(foo,sizeof(int),1,fp);
printf("magic number = %d\n", swapEndianInt(foo[0]));
fread(foo,sizeof(int),1,fp);
printf("number of items = %d\n", swapEndianInt(foo[0]));
fread(foo,sizeof(int),1,fp);
printf("number of rows = %d\n", swapEndianInt(foo[0]));
fread(foo,sizeof(int),1,fp);
printf("number of rows = %d\n", swapEndianInt(foo[0]));
for (j=0;j<NUM;j++) {
for (i=0;i<28;i++)
fread((image+i*29+j*29*29),sizeof(char),28,fp);
}
//fread(layer1,sizeof(float),29*29,fp);
fclose(fp);
for (i=0;i<(29*29*NUM);i++)
layer1[i] = (1.0 - (float) image[i]/256);
}
else
{
printf("FAIL! data/t10k-images-idx3-ubyte NOT FOUND!\n");
exit(1);
}
}
void output(double *final)
{
int i;
FILE *fp=0;
fp=fopen("out.res","wb");
if(fp)
{
//for(i=0;i<10;i++) {
// printf("output[%d] = %e\n", i, final[i]);
//}
fwrite(final,sizeof(double),10,fp);
fclose(fp);
}
}
void NeuralNetwork()
{
int x,y;
// initialise card and timer
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev));
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
else
CUDA_SAFE_CALL(cudaSetDevice(dev));
//float Layer1_Neurons_CPU[29*29*NUM];
float *Layer1_Neurons_CPU = (float*) malloc (29*29*NUM * sizeof(float));
/*={
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,0,0,0,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};*/
readIn(Layer1_Neurons_CPU);
if (g_verbose) {
for(y=0;y< 29*NUM;y++) {
if(!(y%29)) printf("\n");
for (x=0;x<29;x++) {
if (Layer1_Neurons_CPU[y*29+x]<0.5) {
printf("0");
}
else printf(" ");
//printf("%d", (Layer1_Neurons_CPU[y*29+x]>0.5));
}
printf("\n");
}
}
float *Layer1_Neurons_GPU;
float Layer1_Weights_CPU[156];
float *Layer1_Weights_GPU;
float Layer2_Weights_CPU[7800];
float *Layer2_Weights_GPU;
float *Layer2_Neurons_GPU;
float Layer3_Weights_CPU[125100];
float *Layer3_Weights_GPU;
float *Layer3_Neurons_GPU;
float Layer4_Weights_CPU[1010];
float *Layer4_Weights_GPU;
float *Layer4_Neurons_GPU;
//float Layer5_Neurons_CPU[10*NUM];//={0,0,0,0,0,0,0,0,0,0};
float *Layer5_Neurons_CPU = (float*) malloc(10*NUM * sizeof(float));
for (x=0;x<10*NUM;x++) Layer5_Neurons_CPU[x]=0;
float *Layer5_Neurons_GPU;
double *outputLayer;
//unsigned int timer = 0;
//float totaltime = 0.0f;
//init input here
InitHostMem(Layer1_Weights_CPU,Layer2_Weights_CPU,Layer3_Weights_CPU,Layer4_Weights_CPU);
//allocate momory on Device
//InitGPUMem(Layer1_Neurons_GPU,Layer1_Weights_GPU,Layer2_Neurons_GPU,Layer2_Weights_GPU,Layer3_Neurons_GPU,Layer3_Weights_GPU,Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer1_Neurons_GPU, sizeof(float)*29*29*NUM));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer1_Weights_GPU, sizeof(float)*156));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer2_Neurons_GPU, sizeof(float)*13*13*6*NUM));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer2_Weights_GPU, sizeof(float)*7800));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer3_Neurons_GPU, sizeof(float)*1250*NUM));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer3_Weights_GPU, sizeof(float)*125100));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer4_Neurons_GPU, sizeof(float)*100*NUM));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer4_Weights_GPU, sizeof(float)*1010));
CUDA_SAFE_CALL(cudaMalloc((void**) &Layer5_Neurons_GPU, sizeof(float)*10*NUM));
outputLayer = (double*)malloc(sizeof(double)*10*NUM);
//init 29x29 handwritting array
// already done in "initial"
//copy from CPU to GPU
CUDA_SAFE_CALL(cudaMemcpy(Layer1_Neurons_GPU,Layer1_Neurons_CPU, sizeof(float)*29*29*NUM, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(Layer1_Weights_GPU,Layer1_Weights_CPU, sizeof(float)*156, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(Layer2_Weights_GPU,Layer2_Weights_CPU, sizeof(float)*7800, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(Layer3_Weights_GPU,Layer3_Weights_CPU, sizeof(float)*125100, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(Layer4_Weights_GPU,Layer4_Weights_CPU, sizeof(float)*1010, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(Layer5_Neurons_GPU,Layer5_Neurons_CPU, sizeof(float)*10*NUM, cudaMemcpyHostToDevice));
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// CUT_SAFE_CALL(cutStartTimer(timer));
printf("NUM=%d\n", NUM);
dim3 Layer1_Block(6,NUM,1);
dim3 Layer1_Thread(13,13);
executeFirstLayer<<<Layer1_Block,Layer1_Thread>>>(Layer1_Neurons_GPU,Layer1_Weights_GPU,Layer2_Neurons_GPU);
dim3 Layer2_Block(50,NUM,1);
dim3 Layer2_Thread(5,5);
executeSecondLayer<<<Layer2_Block,Layer2_Thread>>>(Layer2_Neurons_GPU, Layer2_Weights_GPU,Layer3_Neurons_GPU);
dim3 Layer3_Block(100,NUM,1);
dim3 Layer3_Thread(1,1);
executeThirdLayer<<<Layer3_Block,Layer3_Thread>>>(Layer3_Neurons_GPU, Layer3_Weights_GPU,Layer4_Neurons_GPU);
dim3 Layer4_Block(10,NUM,1);
dim3 Layer4_Thread(1,1);
executeFourthLayer<<<Layer4_Block,Layer4_Thread>>>(Layer4_Neurons_GPU,Layer4_Weights_GPU,Layer5_Neurons_GPU);
CUT_CHECK_ERROR("Kernel execution failed");
// CUT_SAFE_CALL(cutStopTimer(timer));
// totaltime = cutGetTimerValue(timer);
//copy from GPU to CPU
CUDA_SAFE_CALL(cudaMemcpy(Layer5_Neurons_CPU,Layer5_Neurons_GPU, sizeof(float)*10*NUM, cudaMemcpyDeviceToHost));
// stop and destroy timer
//printf("Processing time: %f (ms) \n", totaltime);
// CUT_SAFE_CALL(cutDeleteTimer(timer));
for(int a=0;a<10*NUM;a++)
{
//printf("output[%d]=%f\n", a, Layer5_Neurons_CPU[a]);
outputLayer[a] = (double)Layer5_Neurons_CPU[a];
if (!(a%10)) {
if (a) printf("%d ", y);
x=outputLayer[a];
y=0;
}
if (outputLayer[a]>x) {
x=outputLayer[a];
y=a%10;
}
}
printf("%d\n", y);
output(outputLayer);
/*
//float Layer4_Neurons_CPU[100*NUM];
float *Layer4_Neurons_CPU = (float*) malloc(100*NUM*sizeof(float));
CUDA_SAFE_CALL(cudaMemcpy(Layer4_Neurons_CPU,Layer4_Neurons_GPU,sizeof(float)*100,cudaMemcpyDeviceToHost));
FILE *fp=fopen("layer_4.neu","wb");
fwrite(Layer4_Neurons_CPU,sizeof(float),100*NUM,fp);
fclose(fp);
//float Layer3_Neurons_CPU[50*5*5*NUM];
float *Layer3_Neurons_CPU = (float*) malloc(50*5*5*NUM*sizeof(float));
CUDA_SAFE_CALL(cudaMemcpy(Layer3_Neurons_CPU,Layer3_Neurons_GPU,sizeof(float)*50*5*5,cudaMemcpyDeviceToHost));
fp=fopen("layer_3.neu","wb");
fwrite(Layer3_Neurons_CPU,sizeof(float),50*5*5*NUM,fp);
fclose(fp);
//float Layer2_Neurons_CPU[13*13*6*NUM];
float *Layer2_Neurons_CPU = (float*) malloc(13*13*6*NUM*sizeof(float));
CUDA_SAFE_CALL(cudaMemcpy(Layer2_Neurons_CPU,Layer2_Neurons_GPU,sizeof(float)*13*13*6,cudaMemcpyDeviceToHost));
fp=fopen("layer_2.neu","wb");
fwrite(Layer2_Neurons_CPU,sizeof(float),13*13*6*NUM,fp);
fclose(fp);
fp=fopen("layer_1.neu","wb");
fwrite(Layer1_Neurons_CPU,sizeof(float),29*29*NUM,fp);
fclose(fp); */
exit(0);
}
|
1b48a6c7b004fed1b3671ab9f73a30b256c629c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void sum_test(float *a, float *b) {
int tid = threadIdx.x;
b[0] = 0;
__syncthreads();
// b[0] = a[tid] + 2;
// printf("a[%d] is %2.2f\n", tid, a[tid]);
// printf("the thread id is %d\n", tid);
// printf("b[0] is: %2.1f\n", b[0]);
// atomicAdd(&a[tid], 1);
atomicAdd(&a[tid], 1);
b[0] += a[tid];
}
__global__ void hist_compute(int *a, int *hist) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid + bid * blockDim.x;
// printf("a[%d] is %d\n", idx, a[idx]);
// hist[a[idx]] += 1;
atomicAdd(&hist[a[idx]], 1);
}
int main(int argc, char* argv[]) {
int pixel_num = 5120;
int a[pixel_num];
int length = 10;
for (int i = 0; i < pixel_num; i++) {
a[i] = i * (i + 1) % length;
// printf("a[%d]=%d\n", i, a[i]);
}
int *hist = new int[length]();
for (int i = 0; i < pixel_num; i++) {
hist[a[i]] += 1;
}
for (int i = 0; i < length; i++) {
printf("hist[%d]=%d\n", i, hist[i]);
}
int *aGpu, *histGpu;
int hist2[length];
hipMalloc((void**)&aGpu, pixel_num * sizeof(int));
hipMalloc((void**)&histGpu, length * sizeof(int));
hipMemcpy(aGpu, a, pixel_num * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( hist_compute), dim3(pixel_num / 512), dim3(512), 0, 0, aGpu, histGpu);
hipMemcpy(hist2, histGpu, length * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < length; i++) {
printf("hist[%d]=%d\n", i, hist2[i]);
}
return 0;
}
| 1b48a6c7b004fed1b3671ab9f73a30b256c629c5.cu | #include <stdio.h>
__global__ void sum_test(float *a, float *b) {
int tid = threadIdx.x;
b[0] = 0;
__syncthreads();
// b[0] = a[tid] + 2;
// printf("a[%d] is %2.2f\n", tid, a[tid]);
// printf("the thread id is %d\n", tid);
// printf("b[0] is: %2.1f\n", b[0]);
// atomicAdd(&a[tid], 1);
atomicAdd(&a[tid], 1);
b[0] += a[tid];
}
__global__ void hist_compute(int *a, int *hist) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid + bid * blockDim.x;
// printf("a[%d] is %d\n", idx, a[idx]);
// hist[a[idx]] += 1;
atomicAdd(&hist[a[idx]], 1);
}
int main(int argc, char* argv[]) {
int pixel_num = 5120;
int a[pixel_num];
int length = 10;
for (int i = 0; i < pixel_num; i++) {
a[i] = i * (i + 1) % length;
// printf("a[%d]=%d\n", i, a[i]);
}
int *hist = new int[length]();
for (int i = 0; i < pixel_num; i++) {
hist[a[i]] += 1;
}
for (int i = 0; i < length; i++) {
printf("hist[%d]=%d\n", i, hist[i]);
}
int *aGpu, *histGpu;
int hist2[length];
cudaMalloc((void**)&aGpu, pixel_num * sizeof(int));
cudaMalloc((void**)&histGpu, length * sizeof(int));
cudaMemcpy(aGpu, a, pixel_num * sizeof(int), cudaMemcpyHostToDevice);
hist_compute<<<pixel_num / 512, 512>>>(aGpu, histGpu);
cudaMemcpy(hist2, histGpu, length * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < length; i++) {
printf("hist[%d]=%d\n", i, hist2[i]);
}
return 0;
}
|
9a5b7b73dc78576ff87d886737303841feb291d6.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
extern __device__ int GetThdID();
__device__ int* g_pingpong_mailbox;
__global__ void pingpong() {
const int tid = GetThdID();
volatile int* mailbox = g_pingpong_mailbox + tid;
// Wait on host wake
bool done = false;
int curr_timestamp;
while (!done) {
if (*mailbox != 0) {
done = true;
curr_timestamp = *(g_pingpong_mailbox + blockDim.x * gridDim.x);
*mailbox = curr_timestamp;
}
}
printf("[CUDA Kernel] Thread %d awakened at timestamp %d\n", tid, curr_timestamp);
}
__global__ void pingpong_signal(int x) {
int* mailbox = g_pingpong_mailbox + x;
*mailbox = 1;
} | 9a5b7b73dc78576ff87d886737303841feb291d6.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <cuda.h>
#include "device_launch_parameters.h"
extern __device__ int GetThdID();
__device__ int* g_pingpong_mailbox;
__global__ void pingpong() {
const int tid = GetThdID();
volatile int* mailbox = g_pingpong_mailbox + tid;
// Wait on host wake
bool done = false;
int curr_timestamp;
while (!done) {
if (*mailbox != 0) {
done = true;
curr_timestamp = *(g_pingpong_mailbox + blockDim.x * gridDim.x);
*mailbox = curr_timestamp;
}
}
printf("[CUDA Kernel] Thread %d awakened at timestamp %d\n", tid, curr_timestamp);
}
__global__ void pingpong_signal(int x) {
int* mailbox = g_pingpong_mailbox + x;
*mailbox = 1;
} |
e64df9ba2a3b990345670e96567cf77a86a50e86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<time.h>
__global__ void gathertrajctoryKernel(int b,int n,int m,int t,const float * __restrict__ inp,const int * __restrict__ idx, float * __restrict__ out){
for(int i = blockIdx.x;i<b;i+=gridDim.x){
for(int j = threadIdx.x;j<m; j+=blockDim.x){
int tmp = idx[i*m+j];
for(int k = 0;k<t;k++){
int tmp_idx1 = ((i*m+j)*t+k);
int tmp_idx2 = ((i*n+tmp)*t+k);
out[tmp_idx1*3+0]=inp[tmp_idx2*3+0];
out[tmp_idx1*3+1]=inp[tmp_idx2*3+1];
out[tmp_idx1*3+2]=inp[tmp_idx2*3+2];
}
}
}
}
void gathertrajctoryLauncher(int b,int n,int m,int t,const float * inp,const int *idx, float *out){
//clock_t start,finish;
//double totaltime;
//start=clock();
hipLaunchKernelGGL(( gathertrajctoryKernel), dim3(32),dim3(512), 0, 0, b,n,m,t,inp,idx,out);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("gathertrajctoryKernel:%f b:%d n:%d m:%d t:%d \n",totaltime,b,n,m,t);
}
__global__ void gathertrajectorygradKernel(int b,int n,int m,int t,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for(int i = blockIdx.x;i<b;i+=gridDim.x){
for(int j = threadIdx.x;j<m; j+=blockDim.x){
int tmp = idx[i*m+j];
for(int k = 0;k<t;k++){
int tmp_idx1 = ((i*m+j)*t+k);
int tmp_idx2 = ((i*n+tmp)*t+k);
atomicAdd(&inp_g[tmp_idx2*3+0],out_g[tmp_idx1*3+0]);
atomicAdd(&inp_g[tmp_idx2*3+1],out_g[tmp_idx1*3+1]);
atomicAdd(&inp_g[tmp_idx2*3+2],out_g[tmp_idx1*3+2]);
}
}
}
}
void gathertrajectorygradLauncher(int b,int n,int m,int t,const float * out_g,const int * idx,float * inp_g){
//clock_t start,finish;
//double totaltime;
//start=clock();
hipLaunchKernelGGL(( gathertrajectorygradKernel), dim3(32),dim3(128), 0, 0, b,n,m,t,out_g,idx,inp_g);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("gathertrajectorygradKernel:%f \n",totaltime);
}
__global__ void farthestpointsamplingtrajectoryKernel(int b,int n,int m,int t,const float * __restrict__ trajectory,float * __restrict__ temp,int * __restrict__ sample_idx){
const int BlockSize = 512;
__shared__ float max_dists[BlockSize];
__shared__ int dists_idx[BlockSize];
const int BufferSize=2880;
__shared__ float buf[BufferSize*3];
const int framesize = 64;
__shared__ float framebufx[framesize];
__shared__ float framebufy[framesize];
__shared__ float framebufz[framesize];
for(int i=blockIdx.x;i<b;i+=gridDim.x){ //batch init
int last = 0;
if (threadIdx.x==0)
sample_idx[i*m+0]=last;
for(int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for(int j=threadIdx.x;j<min(BufferSize,n*t)*3;j+=blockDim.x){
buf[j]=trajectory[i*n*t*3+j];
}
__syncthreads();
for(int j=0;j<m;j++){ //each sample step
float t_max_dists = -1;
int t_dist_idx = 0;
for(int k=0;k<min(t,framesize);k++){
int tmp_idx = i*n*t*3 + last*t*3 + k*3;
framebufx[k] = trajectory[tmp_idx + 0];
framebufy[k] = trajectory[tmp_idx + 1];
framebufz[k] = trajectory[tmp_idx + 2];
}
for(int k=threadIdx.x;k<n;k+=blockDim.x){ //compute dis
float td=temp[blockIdx.x*n+k];
float td_new = 0;
float tx1=0,ty1=0,tz1=0,tx2=0,ty2=0,tz2=0;
for(int u=0;u<t;u++){
if(u<framesize){
int tmp_idx = u;
tx1=framebufx[tmp_idx];
ty1=framebufy[tmp_idx];
tz1=framebufz[tmp_idx];
}else{
int tmp_idx = i*n*t*3 + last*t*3 + u*3;
tx1=trajectory[tmp_idx+0];
ty1=trajectory[tmp_idx+1];
tz1=trajectory[tmp_idx+2];
}
if(k*t+u<BufferSize){
int tmp_idx = (k*t+u)*3;
tx2=buf[tmp_idx+0];
ty2=buf[tmp_idx+1];
tz2=buf[tmp_idx+2];
}else{
int tmp_idx = i*n*t*3 + k*t*3 + u*3;
tx2=trajectory[tmp_idx+0];
ty2=trajectory[tmp_idx+1];
tz2=trajectory[tmp_idx+2];
}
td_new += max(((tx2-tx1)*(tx2-tx1)+(ty2-ty1)*(ty2-ty1)+(tz2-tz1)*(tz2-tz1)),1e-20f);
}
td_new/=t;
float d2=min(td,td_new);
if(d2!=td)
temp[blockIdx.x*n+k]=d2;
if(d2>t_max_dists){
t_max_dists=d2;
t_dist_idx=k;
}
}
max_dists[threadIdx.x]=t_max_dists;
dists_idx[threadIdx.x]=t_dist_idx;
for (int u=0;(1<<u)<blockDim.x;u++){ //reduce min
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (max_dists[i1]<max_dists[i2]){
max_dists[i1]=max_dists[i2];
dists_idx[i1]=dists_idx[i2];
}
}
}
__syncthreads();
last=dists_idx[0];
if (threadIdx.x==0)
sample_idx[i*m+j]=last;
}
}
}
//require 32*n working space
void farthestpointsamplingtrajectoryLauncher(int b,int n,int m,int t,const float * inp,float * temp,int *out){
//clock_t start,finish;
//double totaltime;
//start=clock();
hipLaunchKernelGGL(( farthestpointsamplingtrajectoryKernel), dim3(32),dim3(512), 0, 0, b,n,m,t,inp,temp,out);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("farthestpointsamplingtrajectoryKernel:%f \n",totaltime);
}
| e64df9ba2a3b990345670e96567cf77a86a50e86.cu | #include<stdio.h>
#include<time.h>
__global__ void gathertrajctoryKernel(int b,int n,int m,int t,const float * __restrict__ inp,const int * __restrict__ idx, float * __restrict__ out){
for(int i = blockIdx.x;i<b;i+=gridDim.x){
for(int j = threadIdx.x;j<m; j+=blockDim.x){
int tmp = idx[i*m+j];
for(int k = 0;k<t;k++){
int tmp_idx1 = ((i*m+j)*t+k);
int tmp_idx2 = ((i*n+tmp)*t+k);
out[tmp_idx1*3+0]=inp[tmp_idx2*3+0];
out[tmp_idx1*3+1]=inp[tmp_idx2*3+1];
out[tmp_idx1*3+2]=inp[tmp_idx2*3+2];
}
}
}
}
void gathertrajctoryLauncher(int b,int n,int m,int t,const float * inp,const int *idx, float *out){
//clock_t start,finish;
//double totaltime;
//start=clock();
gathertrajctoryKernel<<<32,512>>>(b,n,m,t,inp,idx,out);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("gathertrajctoryKernel:%f b:%d n:%d m:%d t:%d \n",totaltime,b,n,m,t);
}
__global__ void gathertrajectorygradKernel(int b,int n,int m,int t,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){
for(int i = blockIdx.x;i<b;i+=gridDim.x){
for(int j = threadIdx.x;j<m; j+=blockDim.x){
int tmp = idx[i*m+j];
for(int k = 0;k<t;k++){
int tmp_idx1 = ((i*m+j)*t+k);
int tmp_idx2 = ((i*n+tmp)*t+k);
atomicAdd(&inp_g[tmp_idx2*3+0],out_g[tmp_idx1*3+0]);
atomicAdd(&inp_g[tmp_idx2*3+1],out_g[tmp_idx1*3+1]);
atomicAdd(&inp_g[tmp_idx2*3+2],out_g[tmp_idx1*3+2]);
}
}
}
}
void gathertrajectorygradLauncher(int b,int n,int m,int t,const float * out_g,const int * idx,float * inp_g){
//clock_t start,finish;
//double totaltime;
//start=clock();
gathertrajectorygradKernel<<<32,128>>>(b,n,m,t,out_g,idx,inp_g);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("gathertrajectorygradKernel:%f \n",totaltime);
}
__global__ void farthestpointsamplingtrajectoryKernel(int b,int n,int m,int t,const float * __restrict__ trajectory,float * __restrict__ temp,int * __restrict__ sample_idx){
const int BlockSize = 512;
__shared__ float max_dists[BlockSize];
__shared__ int dists_idx[BlockSize];
const int BufferSize=2880;
__shared__ float buf[BufferSize*3];
const int framesize = 64;
__shared__ float framebufx[framesize];
__shared__ float framebufy[framesize];
__shared__ float framebufz[framesize];
for(int i=blockIdx.x;i<b;i+=gridDim.x){ //batch init
int last = 0;
if (threadIdx.x==0)
sample_idx[i*m+0]=last;
for(int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for(int j=threadIdx.x;j<min(BufferSize,n*t)*3;j+=blockDim.x){
buf[j]=trajectory[i*n*t*3+j];
}
__syncthreads();
for(int j=0;j<m;j++){ //each sample step
float t_max_dists = -1;
int t_dist_idx = 0;
for(int k=0;k<min(t,framesize);k++){
int tmp_idx = i*n*t*3 + last*t*3 + k*3;
framebufx[k] = trajectory[tmp_idx + 0];
framebufy[k] = trajectory[tmp_idx + 1];
framebufz[k] = trajectory[tmp_idx + 2];
}
for(int k=threadIdx.x;k<n;k+=blockDim.x){ //compute dis
float td=temp[blockIdx.x*n+k];
float td_new = 0;
float tx1=0,ty1=0,tz1=0,tx2=0,ty2=0,tz2=0;
for(int u=0;u<t;u++){
if(u<framesize){
int tmp_idx = u;
tx1=framebufx[tmp_idx];
ty1=framebufy[tmp_idx];
tz1=framebufz[tmp_idx];
}else{
int tmp_idx = i*n*t*3 + last*t*3 + u*3;
tx1=trajectory[tmp_idx+0];
ty1=trajectory[tmp_idx+1];
tz1=trajectory[tmp_idx+2];
}
if(k*t+u<BufferSize){
int tmp_idx = (k*t+u)*3;
tx2=buf[tmp_idx+0];
ty2=buf[tmp_idx+1];
tz2=buf[tmp_idx+2];
}else{
int tmp_idx = i*n*t*3 + k*t*3 + u*3;
tx2=trajectory[tmp_idx+0];
ty2=trajectory[tmp_idx+1];
tz2=trajectory[tmp_idx+2];
}
td_new += max(((tx2-tx1)*(tx2-tx1)+(ty2-ty1)*(ty2-ty1)+(tz2-tz1)*(tz2-tz1)),1e-20f);
}
td_new/=t;
float d2=min(td,td_new);
if(d2!=td)
temp[blockIdx.x*n+k]=d2;
if(d2>t_max_dists){
t_max_dists=d2;
t_dist_idx=k;
}
}
max_dists[threadIdx.x]=t_max_dists;
dists_idx[threadIdx.x]=t_dist_idx;
for (int u=0;(1<<u)<blockDim.x;u++){ //reduce min
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (max_dists[i1]<max_dists[i2]){
max_dists[i1]=max_dists[i2];
dists_idx[i1]=dists_idx[i2];
}
}
}
__syncthreads();
last=dists_idx[0];
if (threadIdx.x==0)
sample_idx[i*m+j]=last;
}
}
}
//require 32*n working space
void farthestpointsamplingtrajectoryLauncher(int b,int n,int m,int t,const float * inp,float * temp,int *out){
//clock_t start,finish;
//double totaltime;
//start=clock();
farthestpointsamplingtrajectoryKernel<<<32,512>>>(b,n,m,t,inp,temp,out);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("farthestpointsamplingtrajectoryKernel:%f \n",totaltime);
}
|
6f53129b9d05edce5ab3ab147cb6d0a83abcf5db.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
namespace {
template <typename scalar_t>
__device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
return 1.0 / (1.0 + exp(-z));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) {
const auto s = sigmoid(z);
return (1.0 - s) * s;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_tanh(scalar_t z) {
const auto t = tanh(z);
return 1 - (t * t);
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) {
return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) {
const auto e = exp(z);
const auto d_relu = z < 0.0 ? 0.0 : 1.0;
return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0);
}
template <typename scalar_t>
__global__ void lltm_cuda_forward_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < gates.size(2)){
input_gate[n][c] = sigmoid(gates[n][0][c]);
output_gate[n][c] = sigmoid(gates[n][1][c]);
candidate_cell[n][c] = elu(gates[n][2][c]);
new_cell[n][c] =
old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c];
}
}
template <typename scalar_t>
__global__ void lltm_cuda_backward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < d_gates.size(2)){
const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c];
const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c];
const auto d_new_cell =
d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
d_old_cell[n][c] = d_new_cell;
const auto d_candidate_cell = input_gate[n][c] * d_new_cell;
const auto d_input_gate = candidate_cell[n][c] * d_new_cell;
d_gates[n][0][c] =
d_input_gate * d_sigmoid(gate_weights[n][0][c]);
d_gates[n][1][c] =
d_output_gate * d_sigmoid(gate_weights[n][1][c]);
d_gates[n][2][c] =
d_candidate_cell * d_elu(gate_weights[n][2][c]);
}
}
} // namespace
std::vector<torch::Tensor> lltm_cuda_forward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor bias,
torch::Tensor old_h,
torch::Tensor old_cell) {
auto X = torch::cat({old_h, input}, /*dim=*/1);
auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
const auto batch_size = old_cell.size(0);
const auto state_size = old_cell.size(1);
auto gates = gate_weights.reshape({batch_size, 3, state_size});
auto new_h = torch::zeros_like(old_cell);
auto new_cell = torch::zeros_like(old_cell);
auto input_gate = torch::zeros_like(old_cell);
auto output_gate = torch::zeros_like(old_cell);
auto candidate_cell = torch::zeros_like(old_cell);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( lltm_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>());
}));
return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
}
std::vector<torch::Tensor> lltm_cuda_backward(
torch::Tensor grad_h,
torch::Tensor grad_cell,
torch::Tensor new_cell,
torch::Tensor input_gate,
torch::Tensor output_gate,
torch::Tensor candidate_cell,
torch::Tensor X,
torch::Tensor gates,
torch::Tensor weights) {
auto d_old_cell = torch::zeros_like(new_cell);
auto d_gates = torch::zeros_like(gates);
const auto batch_size = new_cell.size(0);
const auto state_size = new_cell.size(1);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
hipLaunchKernelGGL(( lltm_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
}));
auto d_gate_weights = d_gates.flatten(1, 2);
auto d_weights = d_gate_weights.t().mm(X);
auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true);
auto d_X = d_gate_weights.mm(weights);
auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
auto d_input = d_X.slice(/*dim=*/1, state_size);
return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates};
} | 6f53129b9d05edce5ab3ab147cb6d0a83abcf5db.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
namespace {
template <typename scalar_t>
__device__ __forceinline__ scalar_t sigmoid(scalar_t z) {
return 1.0 / (1.0 + exp(-z));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_sigmoid(scalar_t z) {
const auto s = sigmoid(z);
return (1.0 - s) * s;
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_tanh(scalar_t z) {
const auto t = tanh(z);
return 1 - (t * t);
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t elu(scalar_t z, scalar_t alpha = 1.0) {
return fmaxf(0.0, z) + fminf(0.0, alpha * (exp(z) - 1.0));
}
template <typename scalar_t>
__device__ __forceinline__ scalar_t d_elu(scalar_t z, scalar_t alpha = 1.0) {
const auto e = exp(z);
const auto d_relu = z < 0.0 ? 0.0 : 1.0;
return d_relu + (((alpha * (e - 1.0)) < 0.0) ? (alpha * e) : 0.0);
}
template <typename scalar_t>
__global__ void lltm_cuda_forward_kernel(
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> old_cell,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_h,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < gates.size(2)){
input_gate[n][c] = sigmoid(gates[n][0][c]);
output_gate[n][c] = sigmoid(gates[n][1][c]);
candidate_cell[n][c] = elu(gates[n][2][c]);
new_cell[n][c] =
old_cell[n][c] + candidate_cell[n][c] * input_gate[n][c];
new_h[n][c] = tanh(new_cell[n][c]) * output_gate[n][c];
}
}
template <typename scalar_t>
__global__ void lltm_cuda_backward_kernel(
torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> d_old_cell,
torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> d_gates,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_h,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> grad_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> new_cell,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> input_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> output_gate,
const torch::PackedTensorAccessor<scalar_t,2,torch::RestrictPtrTraits,size_t> candidate_cell,
const torch::PackedTensorAccessor<scalar_t,3,torch::RestrictPtrTraits,size_t> gate_weights) {
//batch index
const int n = blockIdx.y;
// column index
const int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < d_gates.size(2)){
const auto d_output_gate = tanh(new_cell[n][c]) * grad_h[n][c];
const auto d_tanh_new_cell = output_gate[n][c] * grad_h[n][c];
const auto d_new_cell =
d_tanh(new_cell[n][c]) * d_tanh_new_cell + grad_cell[n][c];
d_old_cell[n][c] = d_new_cell;
const auto d_candidate_cell = input_gate[n][c] * d_new_cell;
const auto d_input_gate = candidate_cell[n][c] * d_new_cell;
d_gates[n][0][c] =
d_input_gate * d_sigmoid(gate_weights[n][0][c]);
d_gates[n][1][c] =
d_output_gate * d_sigmoid(gate_weights[n][1][c]);
d_gates[n][2][c] =
d_candidate_cell * d_elu(gate_weights[n][2][c]);
}
}
} // namespace
std::vector<torch::Tensor> lltm_cuda_forward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor bias,
torch::Tensor old_h,
torch::Tensor old_cell) {
auto X = torch::cat({old_h, input}, /*dim=*/1);
auto gate_weights = torch::addmm(bias, X, weights.transpose(0, 1));
const auto batch_size = old_cell.size(0);
const auto state_size = old_cell.size(1);
auto gates = gate_weights.reshape({batch_size, 3, state_size});
auto new_h = torch::zeros_like(old_cell);
auto new_cell = torch::zeros_like(old_cell);
auto input_gate = torch::zeros_like(old_cell);
auto output_gate = torch::zeros_like(old_cell);
auto candidate_cell = torch::zeros_like(old_cell);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(gates.type(), "lltm_forward_cuda", ([&] {
lltm_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>());
}));
return {new_h, new_cell, input_gate, output_gate, candidate_cell, X, gates};
}
std::vector<torch::Tensor> lltm_cuda_backward(
torch::Tensor grad_h,
torch::Tensor grad_cell,
torch::Tensor new_cell,
torch::Tensor input_gate,
torch::Tensor output_gate,
torch::Tensor candidate_cell,
torch::Tensor X,
torch::Tensor gates,
torch::Tensor weights) {
auto d_old_cell = torch::zeros_like(new_cell);
auto d_gates = torch::zeros_like(gates);
const auto batch_size = new_cell.size(0);
const auto state_size = new_cell.size(1);
const int threads = 1024;
const dim3 blocks((state_size + threads - 1) / threads, batch_size);
AT_DISPATCH_FLOATING_TYPES(X.type(), "lltm_forward_cuda", ([&] {
lltm_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
d_old_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
d_gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>(),
grad_h.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
grad_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
new_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
input_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
output_gate.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
candidate_cell.packed_accessor<scalar_t,2,torch::RestrictPtrTraits,size_t>(),
gates.packed_accessor<scalar_t,3,torch::RestrictPtrTraits,size_t>());
}));
auto d_gate_weights = d_gates.flatten(1, 2);
auto d_weights = d_gate_weights.t().mm(X);
auto d_bias = d_gate_weights.sum(/*dim=*/0, /*keepdim=*/true);
auto d_X = d_gate_weights.mm(weights);
auto d_old_h = d_X.slice(/*dim=*/1, 0, state_size);
auto d_input = d_X.slice(/*dim=*/1, state_size);
return {d_old_h, d_input, d_weights, d_bias, d_old_cell, d_gates};
} |
ae5b8b9c5370453ecbb5623f34caa1f921d3b3c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hashsets.h"
#include "crc.h"
#include <stdint.h>
#include <stdio.h>
//TODO check a bunch of hashes, AND SORT THEM BY SIZE!!! or maybe even build a pseudo binary-tree
int main(){
//Copy hashsets to device
uint8_t *dh1;
uint32_t *dh2;
hipMalloc(&dh1, 256);
hipMalloc(&dh2, 4*256);
hipMemcpy(dh1, hashSet1, 256, hipMemcpyHostToDevice);
hipMemcpy(dh2, hashSet2, 4*256, hipMemcpyHostToDevice);
//Just random hashes, run the program to figure out what they are lol
uint32_t hashes[] = {0x49541d5a, 0x5ad7f6bc, 0x937db7ec, 0xa988cb16, 0xdf4ac7b9};
uint32_t goal = 0xdf4ac7b9;
int maxLen = 7;
for(int len=1;len<=maxLen;len++){
//Checking 32 chars => max = 32^maxLen
uint64_t max=32;
for(int i=0;i<len;i++)
max*=32;
uint64_t *d_result;
hipMalloc(&d_result,sizeof(uint64_t));
//bruteforceCRC32<<<1,1024>>>(d_result,len,max,hashes,sizeof(hashes)/4,dh1,dh2);
hipLaunchKernelGGL(( bruteforceCRC32), dim3(1000),dim3(1024), 0, 0, d_result,len,max,goal,dh1,dh2);
hipDeviceSynchronize();
uint64_t result = 0;
hipMemcpy(&result, d_result, sizeof(uint64_t), hipMemcpyDeviceToHost);
if(result!=0){
char decoded[len];
decode(decoded,len,result);
printf("RESULT: %s -> %x\n",decoded,calculateCRC32(decoded,len));
break;
}
}
hipFree(dh1);
hipFree(dh2);
return 0;
}
| ae5b8b9c5370453ecbb5623f34caa1f921d3b3c8.cu | #include "hashsets.h"
#include "crc.h"
#include <stdint.h>
#include <stdio.h>
//TODO check a bunch of hashes, AND SORT THEM BY SIZE!!! or maybe even build a pseudo binary-tree
int main(){
//Copy hashsets to device
uint8_t *dh1;
uint32_t *dh2;
cudaMalloc(&dh1, 256);
cudaMalloc(&dh2, 4*256);
cudaMemcpy(dh1, hashSet1, 256, cudaMemcpyHostToDevice);
cudaMemcpy(dh2, hashSet2, 4*256, cudaMemcpyHostToDevice);
//Just random hashes, run the program to figure out what they are lol
uint32_t hashes[] = {0x49541d5a, 0x5ad7f6bc, 0x937db7ec, 0xa988cb16, 0xdf4ac7b9};
uint32_t goal = 0xdf4ac7b9;
int maxLen = 7;
for(int len=1;len<=maxLen;len++){
//Checking 32 chars => max = 32^maxLen
uint64_t max=32;
for(int i=0;i<len;i++)
max*=32;
uint64_t *d_result;
cudaMalloc(&d_result,sizeof(uint64_t));
//bruteforceCRC32<<<1,1024>>>(d_result,len,max,hashes,sizeof(hashes)/4,dh1,dh2);
bruteforceCRC32<<<1000,1024>>>(d_result,len,max,goal,dh1,dh2);
cudaDeviceSynchronize();
uint64_t result = 0;
cudaMemcpy(&result, d_result, sizeof(uint64_t), cudaMemcpyDeviceToHost);
if(result!=0){
char decoded[len];
decode(decoded,len,result);
printf("RESULT: %s -> %x\n",decoded,calculateCRC32(decoded,len));
break;
}
}
cudaFree(dh1);
cudaFree(dh2);
return 0;
}
|
fb4890b0144d8a34fdb8cf6e2a0d3242896a346f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <stdio.h>
#define M_PI CUDART_PI_F
__global__
void PreEmphasis(const float PRE_EMPHASIS, const int signalLen,
short* signal, float* emphasised)
{
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id > signalLen)
{
return;
}
if (id == 0)
{
emphasised[0] = float(signal[0]);
}
else
{
emphasised[id] = signal[id] - PRE_EMPHASIS * signal[id - 1];
}
}
__global__
void PreProcessing(const float PRE_EMPHASIS, const int frameLength, const int frameStep, const int signalLen,
short* signal, float* frames)
{
// thread and block id
const int id = threadIdx.x;
const int frameNum = blockIdx.x;
// declare shared memory.
__shared__ short signal_s[514];
__shared__ float emphasis_s[512];
// load signal to shared memory.
if (id == 0)
{
if (frameNum == 0)
{
signal_s[0] = 0;
}
else
{
signal_s[0] = signal[frameStep * frameNum - 1];
}
}
if (frameStep * frameNum + id > signalLen)
{
signal_s[id + 1] = 0;
}
else
{
signal_s[id + 1] = signal[frameStep * frameNum + id];
}
__syncthreads();
if (frameStep * frameNum + id >= signalLen)
{
emphasis_s[id] = 0;
}
else
{
// pre-emphasis
emphasis_s[id] = signal_s[id + 1] - PRE_EMPHASIS * signal_s[id];
}
__syncthreads();
// framing
//frames[frameLength * frameNum + id] = emphasis_s[id] *
// (0.54 - 0.46 * cos(2 * M_PI * id / (frameLength - 1)));
frames[frameLength * frameNum + id] = emphasis_s[id] * HammingFilter[id];
}
__device__
float2 CAdd(const float2 a, const float2 b)
{
float2 result;
result.x = a.x + b.x;
result.y = a.y + b.y;
return result;
}
__device__
float2 CSub(const float2 a, const float2 b)
{
float2 result;
result.x = a.x - b.x;
result.y = a.y - b.y;
return result;
}
__device__
float2 CMul(const float2 a, const float2 b)
{
float2 result;
result.x = (a.x * b.x) - (a.y * b.y);
result.y = (a.x * b.y) + (a.y * b.x);
return result;
}
__global__
void PowerFFT(const int bitLength, const int frameLength, const int frameStep, const int signalLen,
float* input, float* output)
{
const int id = threadIdx.x;
const int frameNum = blockIdx.x;
const int N = 1 << bitLength;
int l = 1 << (bitLength - 1);
int m = 1;
int j, k;
float x;
extern __shared__ float2 s[];
__shared__ float2* input_s;
__shared__ float2* output_s;
__shared__ float2* twiddle_s;
input_s = s;
output_s = &input_s[512];
twiddle_s = &output_s[512];
__shared__ float2* tmp_s;
// copy global data to shared memory.
if (frameNum * frameStep + id >= signalLen)
{
input_s[id].x = 0;
}
else
{
input_s[id].x = input[frameNum * frameStep + id] *
(0.54 - 0.46 * cos(2 * M_PI * id / (frameLength - 1)));
}
input_s[id].y = 0;
if (frameNum * frameStep + (id + l) >= signalLen)
{
input_s[id + l].x = 0;
}
else
{
input_s[id + l].x = input[frameNum * frameStep + (id + l)] *
(0.54 - 0.46 * cos(2 * M_PI * (id + l) / (frameLength - 1)));
}
input_s[id + l].y = 0;
// calculate twiddle factor
twiddle_s[0] = make_float2(1.0f, 0.0f);
int lo = id + 1; // location to compute
int log = 0; // integer value of log
while ((lo >>= 1) > 0)
{
++log;
}
lo = id + 1;
int j_t = (lo - (1 << log)) * 2; // j value of twiddle
int l_t = 1 << (log + 1); // l value of twiddle
x = -M_PI * j_t / l_t;
twiddle_s[l_t - 1 + j_t] = make_float2(cos(x), sin(x));
++j_t;
x = -M_PI * j_t / l_t;
twiddle_s[l_t - 1 + j_t] = make_float2(cos(x), sin(x));
__syncthreads();
// fft
for (l = 1 << (bitLength - 1); l >= 1; l >>= 1, m <<= 1)
{
j = id / m;
k = id % m;
output_s[k + 2 * j * m] = CAdd(input_s[k + j * m], input_s[k + j * m + l * m]);
output_s[k + 2 * j * m + m] = CMul(twiddle_s[l - 1 + j], CSub(input_s[k + j * m], input_s[k + j * m + l * m]));
__syncthreads();
tmp_s = input_s;
input_s = output_s;
output_s = tmp_s;
}
output[frameNum * ((N / 2) + 1) + id] = (input_s[id].x * input_s[id].x + input_s[id].y * input_s[id].y) / N;
if (id == 0)
{
int i = N >> 1;
output[frameNum * (i + 1) + i] = (input_s[i].x * input_s[i].x + input_s[i].y * input_s[i].y) / N;
}
}
__global__
void MelFilterBank(const int numFrames, const int fftLen, const int nFilter, const int threadSize,
float* powFrames, float* fbank, float* filterBanks)
{
const int id_y = threadIdx.x;
const int id_x = threadIdx.y;
const int gridIdx_y = blockIdx.x;
const int gridIdx_x = blockIdx.y;
__shared__ float powFrames_s[20][20];
__shared__ float fbank_s[20][20];
float value = 1.19209e-07;
int y = (threadSize * gridIdx_y + id_y);
const int repeat = (int)(fftLen / threadSize) + 1;
for (int count = 0; count < repeat; ++count)
{
// load powFrames
if (y < numFrames && count * threadSize + id_x < fftLen)
{
powFrames_s[id_y][id_x] = powFrames[y * fftLen + count * threadSize + id_x];
}
else
{
powFrames_s[id_y][id_x] = 0;
}
// load filterBanks
if (count * threadSize + id_y < fftLen)
{
fbank_s[id_y][id_x] = fbank[(count * threadSize + id_y) * nFilter + gridIdx_x * threadSize + id_x];
}
else
{
fbank_s[id_y][id_x] = 0;
}
__syncthreads();
for (int k = 0; k < threadSize; ++k)
{
value += powFrames_s[id_y][k] * fbank_s[k][id_x];
}
__syncthreads();
}
if (y < numFrames)
{
filterBanks[y * nFilter + gridIdx_x * threadSize + id_x] = 20 * log10(value);
}
}
__global__
void MelFilterBank_Sparse(const int numFrames, const int fftLen, const int nFilter,
float* powFrames, float* filterBanks,
float* fbanks_val, int* fbanks_col, int* fbanks_row)
{
const int id = threadIdx.x;
const int blockId = blockIdx.x;
int startNum = fbanks_col[id];
int endNum = fbanks_col[id + 1];
float value = 1.19209e-07;
for (int k = startNum; k < endNum; ++k)
{
value += fbanks_val[k] * powFrames[blockId * fftLen + fbanks_row[k]];
}
filterBanks[blockId * nFilter + id] = 20 * log10(value);
}
__global__
void DCT(const int numFrames, const int nFilter, const int numCeps, const float cepLifter,
float* filterBanks, float* mfcc)
{
const int id = threadIdx.x;
const int blockId = blockIdx.x;
__shared__ float filterBanks_s[40];
filterBanks_s[id] = filterBanks[nFilter * blockId + id];
__syncthreads();
if (id > 0 && id < numCeps + 1)
{
float value = 0;
for (int k = 0; k < nFilter; ++k)
{
value += filterBanks_s[k] * cos(M_PI * id * (2 * k + 1) / (2 * nFilter));
}
float lift = 1 + (cepLifter / 2) * sin(M_PI * (id - 1) / cepLifter);
mfcc[blockId * numCeps + id - 1] = 2 * value / sqrt(2.0 * nFilter) * lift;
}
}
__global__
void MeanNorm(const int numFrames, const int numCeps,
float* mfcc)
{
const int id = threadIdx.x;
const int blockId = blockIdx.x;
const int blockSize = blockDim.x;
const int halfSize = numFrames / 2;
extern __shared__ float x[];
__shared__ float* scratch;
scratch = x;
if (id < halfSize)
{
scratch[id] = mfcc[id * numCeps + blockId] + mfcc[(id + halfSize) * numCeps + blockId];
}
else
{
scratch[id] = 0;
}
if (numFrames % 2 == 1)
{
if (id == 0)
{
scratch[id] += mfcc[(numFrames - 1) * numCeps + blockId];
}
}
__syncthreads();
for (unsigned int stride = blockSize >> 1; id < stride; stride >>= 1)
{
scratch[id] += scratch[id + stride];
__syncthreads();
}
float mean = scratch[0] / float(numFrames);
if (id < halfSize)
{
mfcc[id * numCeps + blockId] /= mean;
mfcc[(id + halfSize) * numCeps + blockId] /= mean;
if (id == 0)
{
mfcc[(numFrames - 1) * numCeps + blockId] /= mean;
}
}
}
__global__
void MeanNorm_global(const int numFrames, const int numCeps,
float* mfcc, float* meanNorm)
{
const int id = threadIdx.x;
const int blockId = blockIdx.x;
const int blockSize = blockDim.x;
const int halfSize = numFrames / 2;
if (id < halfSize)
{
meanNorm[id * numCeps + blockId] = mfcc[id * numCeps + blockId] + mfcc[(id + halfSize) * numCeps + blockId];
}
else
{
meanNorm[id * numCeps + blockId] = 0;
}
if (numFrames % 2 == 1)
{
if (id == 0)
{
meanNorm[blockId] += mfcc[(numFrames - 1) * numCeps + blockId];
}
}
__syncthreads();
for (int stride = blockSize >> 1; id < stride; stride >>= 1)
{
meanNorm[id * numCeps + blockId] += meanNorm[(id + stride) * numCeps + blockId];
__syncthreads();
}
float mean = meanNorm[blockId] / float(numFrames) + 1.19209e-07;
if (id < numFrames)
{
mfcc[id * numCeps + blockId] -= mean;
}
//if (id < halfSize)
//{
// mfcc[id * numCeps + blockId] -= mean;
// mfcc[(id + halfSize) * numCeps + blockId] -= mean;
//
// if (id == 0)
// {
// mfcc[(numFrames - 1) * numCeps + blockId] -= mean;
// }
//}
}
| fb4890b0144d8a34fdb8cf6e2a0d3242896a346f.cu | #include "kernel.h"
#include <stdio.h>
#define M_PI CUDART_PI_F
__global__
void PreEmphasis(const float PRE_EMPHASIS, const int signalLen,
short* signal, float* emphasised)
{
const int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id > signalLen)
{
return;
}
if (id == 0)
{
emphasised[0] = float(signal[0]);
}
else
{
emphasised[id] = signal[id] - PRE_EMPHASIS * signal[id - 1];
}
}
__global__
void PreProcessing(const float PRE_EMPHASIS, const int frameLength, const int frameStep, const int signalLen,
short* signal, float* frames)
{
// thread and block id
const int id = threadIdx.x;
const int frameNum = blockIdx.x;
// declare shared memory.
__shared__ short signal_s[514];
__shared__ float emphasis_s[512];
// load signal to shared memory.
if (id == 0)
{
if (frameNum == 0)
{
signal_s[0] = 0;
}
else
{
signal_s[0] = signal[frameStep * frameNum - 1];
}
}
if (frameStep * frameNum + id > signalLen)
{
signal_s[id + 1] = 0;
}
else
{
signal_s[id + 1] = signal[frameStep * frameNum + id];
}
__syncthreads();
if (frameStep * frameNum + id >= signalLen)
{
emphasis_s[id] = 0;
}
else
{
// pre-emphasis
emphasis_s[id] = signal_s[id + 1] - PRE_EMPHASIS * signal_s[id];
}
__syncthreads();
// framing
//frames[frameLength * frameNum + id] = emphasis_s[id] *
// (0.54 - 0.46 * cos(2 * M_PI * id / (frameLength - 1)));
frames[frameLength * frameNum + id] = emphasis_s[id] * HammingFilter[id];
}
__device__
float2 CAdd(const float2 a, const float2 b)
{
float2 result;
result.x = a.x + b.x;
result.y = a.y + b.y;
return result;
}
__device__
float2 CSub(const float2 a, const float2 b)
{
float2 result;
result.x = a.x - b.x;
result.y = a.y - b.y;
return result;
}
__device__
float2 CMul(const float2 a, const float2 b)
{
float2 result;
result.x = (a.x * b.x) - (a.y * b.y);
result.y = (a.x * b.y) + (a.y * b.x);
return result;
}
__global__
void PowerFFT(const int bitLength, const int frameLength, const int frameStep, const int signalLen,
float* input, float* output)
{
const int id = threadIdx.x;
const int frameNum = blockIdx.x;
const int N = 1 << bitLength;
int l = 1 << (bitLength - 1);
int m = 1;
int j, k;
float x;
extern __shared__ float2 s[];
__shared__ float2* input_s;
__shared__ float2* output_s;
__shared__ float2* twiddle_s;
input_s = s;
output_s = &input_s[512];
twiddle_s = &output_s[512];
__shared__ float2* tmp_s;
// copy global data to shared memory.
if (frameNum * frameStep + id >= signalLen)
{
input_s[id].x = 0;
}
else
{
input_s[id].x = input[frameNum * frameStep + id] *
(0.54 - 0.46 * cos(2 * M_PI * id / (frameLength - 1)));
}
input_s[id].y = 0;
if (frameNum * frameStep + (id + l) >= signalLen)
{
input_s[id + l].x = 0;
}
else
{
input_s[id + l].x = input[frameNum * frameStep + (id + l)] *
(0.54 - 0.46 * cos(2 * M_PI * (id + l) / (frameLength - 1)));
}
input_s[id + l].y = 0;
// calculate twiddle factor
twiddle_s[0] = make_float2(1.0f, 0.0f);
int lo = id + 1; // location to compute
int log = 0; // integer value of log
while ((lo >>= 1) > 0)
{
++log;
}
lo = id + 1;
int j_t = (lo - (1 << log)) * 2; // j value of twiddle
int l_t = 1 << (log + 1); // l value of twiddle
x = -M_PI * j_t / l_t;
twiddle_s[l_t - 1 + j_t] = make_float2(cos(x), sin(x));
++j_t;
x = -M_PI * j_t / l_t;
twiddle_s[l_t - 1 + j_t] = make_float2(cos(x), sin(x));
__syncthreads();
// fft
for (l = 1 << (bitLength - 1); l >= 1; l >>= 1, m <<= 1)
{
j = id / m;
k = id % m;
output_s[k + 2 * j * m] = CAdd(input_s[k + j * m], input_s[k + j * m + l * m]);
output_s[k + 2 * j * m + m] = CMul(twiddle_s[l - 1 + j], CSub(input_s[k + j * m], input_s[k + j * m + l * m]));
__syncthreads();
tmp_s = input_s;
input_s = output_s;
output_s = tmp_s;
}
output[frameNum * ((N / 2) + 1) + id] = (input_s[id].x * input_s[id].x + input_s[id].y * input_s[id].y) / N;
if (id == 0)
{
int i = N >> 1;
output[frameNum * (i + 1) + i] = (input_s[i].x * input_s[i].x + input_s[i].y * input_s[i].y) / N;
}
}
__global__
void MelFilterBank(const int numFrames, const int fftLen, const int nFilter, const int threadSize,
float* powFrames, float* fbank, float* filterBanks)
{
const int id_y = threadIdx.x;
const int id_x = threadIdx.y;
const int gridIdx_y = blockIdx.x;
const int gridIdx_x = blockIdx.y;
__shared__ float powFrames_s[20][20];
__shared__ float fbank_s[20][20];
float value = 1.19209e-07;
int y = (threadSize * gridIdx_y + id_y);
const int repeat = (int)(fftLen / threadSize) + 1;
for (int count = 0; count < repeat; ++count)
{
// load powFrames
if (y < numFrames && count * threadSize + id_x < fftLen)
{
powFrames_s[id_y][id_x] = powFrames[y * fftLen + count * threadSize + id_x];
}
else
{
powFrames_s[id_y][id_x] = 0;
}
// load filterBanks
if (count * threadSize + id_y < fftLen)
{
fbank_s[id_y][id_x] = fbank[(count * threadSize + id_y) * nFilter + gridIdx_x * threadSize + id_x];
}
else
{
fbank_s[id_y][id_x] = 0;
}
__syncthreads();
for (int k = 0; k < threadSize; ++k)
{
value += powFrames_s[id_y][k] * fbank_s[k][id_x];
}
__syncthreads();
}
if (y < numFrames)
{
filterBanks[y * nFilter + gridIdx_x * threadSize + id_x] = 20 * log10(value);
}
}
__global__
void MelFilterBank_Sparse(const int numFrames, const int fftLen, const int nFilter,
float* powFrames, float* filterBanks,
float* fbanks_val, int* fbanks_col, int* fbanks_row)
{
const int id = threadIdx.x;
const int blockId = blockIdx.x;
int startNum = fbanks_col[id];
int endNum = fbanks_col[id + 1];
float value = 1.19209e-07;
for (int k = startNum; k < endNum; ++k)
{
value += fbanks_val[k] * powFrames[blockId * fftLen + fbanks_row[k]];
}
filterBanks[blockId * nFilter + id] = 20 * log10(value);
}
__global__
void DCT(const int numFrames, const int nFilter, const int numCeps, const float cepLifter,
float* filterBanks, float* mfcc)
{
const int id = threadIdx.x;
const int blockId = blockIdx.x;
__shared__ float filterBanks_s[40];
filterBanks_s[id] = filterBanks[nFilter * blockId + id];
__syncthreads();
if (id > 0 && id < numCeps + 1)
{
float value = 0;
for (int k = 0; k < nFilter; ++k)
{
value += filterBanks_s[k] * cos(M_PI * id * (2 * k + 1) / (2 * nFilter));
}
float lift = 1 + (cepLifter / 2) * sin(M_PI * (id - 1) / cepLifter);
mfcc[blockId * numCeps + id - 1] = 2 * value / sqrt(2.0 * nFilter) * lift;
}
}
__global__
void MeanNorm(const int numFrames, const int numCeps,
float* mfcc)
{
const int id = threadIdx.x;
const int blockId = blockIdx.x;
const int blockSize = blockDim.x;
const int halfSize = numFrames / 2;
extern __shared__ float x[];
__shared__ float* scratch;
scratch = x;
if (id < halfSize)
{
scratch[id] = mfcc[id * numCeps + blockId] + mfcc[(id + halfSize) * numCeps + blockId];
}
else
{
scratch[id] = 0;
}
if (numFrames % 2 == 1)
{
if (id == 0)
{
scratch[id] += mfcc[(numFrames - 1) * numCeps + blockId];
}
}
__syncthreads();
for (unsigned int stride = blockSize >> 1; id < stride; stride >>= 1)
{
scratch[id] += scratch[id + stride];
__syncthreads();
}
float mean = scratch[0] / float(numFrames);
if (id < halfSize)
{
mfcc[id * numCeps + blockId] /= mean;
mfcc[(id + halfSize) * numCeps + blockId] /= mean;
if (id == 0)
{
mfcc[(numFrames - 1) * numCeps + blockId] /= mean;
}
}
}
__global__
void MeanNorm_global(const int numFrames, const int numCeps,
float* mfcc, float* meanNorm)
{
const int id = threadIdx.x;
const int blockId = blockIdx.x;
const int blockSize = blockDim.x;
const int halfSize = numFrames / 2;
if (id < halfSize)
{
meanNorm[id * numCeps + blockId] = mfcc[id * numCeps + blockId] + mfcc[(id + halfSize) * numCeps + blockId];
}
else
{
meanNorm[id * numCeps + blockId] = 0;
}
if (numFrames % 2 == 1)
{
if (id == 0)
{
meanNorm[blockId] += mfcc[(numFrames - 1) * numCeps + blockId];
}
}
__syncthreads();
for (int stride = blockSize >> 1; id < stride; stride >>= 1)
{
meanNorm[id * numCeps + blockId] += meanNorm[(id + stride) * numCeps + blockId];
__syncthreads();
}
float mean = meanNorm[blockId] / float(numFrames) + 1.19209e-07;
if (id < numFrames)
{
mfcc[id * numCeps + blockId] -= mean;
}
//if (id < halfSize)
//{
// mfcc[id * numCeps + blockId] -= mean;
// mfcc[(id + halfSize) * numCeps + blockId] -= mean;
//
// if (id == 0)
// {
// mfcc[(numFrames - 1) * numCeps + blockId] -= mean;
// }
//}
}
|
d4343a8c14d77cc857ccf21f259127b7e98ef1fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void mat_add(int*a , int *b,int *c,int m)
{
int i,index;
int col_id=threadIdx.x;
for(i=0;i<m;i++)
{
index=i*blockDim.x+col_id;
c[index]=a[index]+b[index];
}
}
int main(int argc, char const *argv[])
{
int *a,*b,*c,m,n,i,j;
int *d_a, *d_b,*d_c;
printf("enter the value of m \n");
scanf("%d",&m);
printf("enter the value of n\n");
scanf("%d",&n);
int size= sizeof(int)*m*n;
a=(int*)malloc(m*n*sizeof(int));
b=(int*)malloc(m*n*sizeof(int));
c=(int*)malloc(m*n*sizeof(int));
printf("enter the input1 matrix\n");
for(i=0;i<m*n;i++)
scanf("%d",&a[i]);
printf("enter the input2 matrix\n");
for(i=0;i<m*n;i++)
scanf("%d",&b[i]);
hipMalloc((void**)&d_a,size);
hipMalloc((void**)&d_b,size);
hipMalloc((void**)&d_c,size);
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mat_add), dim3(1),dim3(n), 0, 0, d_a,d_b,d_c,m);
hipMemcpy(c,d_c,size,hipMemcpyDeviceToHost);
printf("the result vector is :\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%d\t",c[i*n+j] );
printf("\n");
}
getchar();
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| d4343a8c14d77cc857ccf21f259127b7e98ef1fe.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void mat_add(int*a , int *b,int *c,int m)
{
int i,index;
int col_id=threadIdx.x;
for(i=0;i<m;i++)
{
index=i*blockDim.x+col_id;
c[index]=a[index]+b[index];
}
}
int main(int argc, char const *argv[])
{
int *a,*b,*c,m,n,i,j;
int *d_a, *d_b,*d_c;
printf("enter the value of m \n");
scanf("%d",&m);
printf("enter the value of n\n");
scanf("%d",&n);
int size= sizeof(int)*m*n;
a=(int*)malloc(m*n*sizeof(int));
b=(int*)malloc(m*n*sizeof(int));
c=(int*)malloc(m*n*sizeof(int));
printf("enter the input1 matrix\n");
for(i=0;i<m*n;i++)
scanf("%d",&a[i]);
printf("enter the input2 matrix\n");
for(i=0;i<m*n;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
mat_add<<<1,n>>>(d_a,d_b,d_c,m);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("the result vector is :\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%d\t",c[i*n+j] );
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
83ae362dfc66236c5d8a7b9adcdb0afb474c94ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "Rippling.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "My_Rippling_Cuda_RGBA_uchar4")
{
assert(w == h); // specific rippling
// Animation
this->dt = dt;
this->t = 0; // protected dans Animable
}
Rippling::~Rippling()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car image pas zoomable
*/
void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
// TODO Rippling GPU
// lancer le kernel avec <<<dg,db>>>
// le kernel est importer ci-dessus (ligne 19)
hipLaunchKernelGGL(( rippling), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,t);
//assert(false); // to delete once implement
}
/**
* Override
* Call periodicly by the API
*/
void Rippling::animationStep()
{
t += dt;
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 83ae362dfc66236c5d8a7b9adcdb0afb474c94ed.cu | #include <iostream>
#include <assert.h>
#include "Device.h"
#include "Rippling.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "My_Rippling_Cuda_RGBA_uchar4")
{
assert(w == h); // specific rippling
// Animation
this->dt = dt;
this->t = 0; // protected dans Animable
}
Rippling::~Rippling()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car image pas zoomable
*/
void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
// TODO Rippling GPU
// lancer le kernel avec <<<dg,db>>>
// le kernel est importer ci-dessus (ligne 19)
rippling<<<dg,db>>>(ptrDevPixels,w,h,t);
//assert(false); // to delete once implement
}
/**
* Override
* Call periodicly by the API
*/
void Rippling::animationStep()
{
t += dt;
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
076409bed42bd92df371b26cc0da2f59efb675d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dcn_v2_im2col_cuda_double.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
/*inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}*/
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N) {
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ double dmcn_im2col_bilinear(const double *bottom_data, const int data_width,
const int height, const int width, double h, double w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
double lh = h - h_low;
double lw = w - w_low;
double hh = 1 - lh, hw = 1 - lw;
double v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
double v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
double v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
double v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
double w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
double val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__device__ double dmcn_get_gradient_weight(double argmax_h, double argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
double weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
__device__ double dmcn_get_coordinate_weight(double argmax_h, double argmax_w,
const int height, const int width, const double *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
double weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const double *data_im, const double *data_offset, const double *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
double *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
double *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const double* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const double *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const double *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const double *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const double offset_h = data_offset_ptr[data_offset_h_ptr];
const double offset_w = data_offset_ptr[data_offset_w_ptr];
const double mask = data_mask_ptr[data_mask_hw_ptr];
double val = static_cast<double>(0);
const double h_im = h_in + i * dilation_h + offset_h;
const double w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const double map_h = i * dilation_h + offset_h;
//const double map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const double *data_col, const double *data_offset, const double *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
double *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const double *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const double *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const double offset_h = data_offset_ptr[data_offset_h_ptr];
const double offset_w = data_offset_ptr[data_offset_w_ptr];
const double mask = data_mask_ptr[data_mask_hw_ptr];
const double cur_inv_h_data = h_in + i * dilation_h + offset_h;
const double cur_inv_w_data = w_in + j * dilation_w + offset_w;
const double cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
double weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const double *data_col, const double *data_im,
const double *data_offset, const double *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
double *grad_offset, double *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
double val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const double *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const double *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const double *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const double *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const double offset_h = data_offset_ptr[data_offset_h_ptr];
const double offset_w = data_offset_ptr[data_offset_w_ptr];
const double mask = data_mask_ptr[data_mask_hw_ptr];
double inv_h = h_in + i * dilation_h + offset_h;
double inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const double weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(hipStream_t stream,
const double *data_im, const double *data_offset, const double *data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, double *data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(hipStream_t stream,
const double *data_col, const double *data_offset, const double *data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, double *grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(hipStream_t stream,
const double *data_col, const double *data_im, const double *data_offset, const double *data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
double *grad_offset, double *grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS),
0, stream,
num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset, grad_mask);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err));
}
} | 076409bed42bd92df371b26cc0da2f59efb675d7.cu | #include "dcn_v2_im2col_cuda_double.h"
#include <cstdio>
#include <algorithm>
#include <cstring>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
/*inline int GET_BLOCKS(const int N)
{
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}*/
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N) {
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ double dmcn_im2col_bilinear(const double *bottom_data, const int data_width,
const int height, const int width, double h, double w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
double lh = h - h_low;
double lw = w - w_low;
double hh = 1 - lh, hw = 1 - lw;
double v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
double v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
double v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
double v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
double w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
double val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
__device__ double dmcn_get_gradient_weight(double argmax_h, double argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
double weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
__device__ double dmcn_get_coordinate_weight(double argmax_h, double argmax_w,
const int height, const int width, const double *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
double weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const double *data_im, const double *data_offset, const double *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
double *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
double *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const double* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const double *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const double *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const double *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const double offset_h = data_offset_ptr[data_offset_h_ptr];
const double offset_w = data_offset_ptr[data_offset_w_ptr];
const double mask = data_mask_ptr[data_mask_hw_ptr];
double val = static_cast<double>(0);
const double h_im = h_in + i * dilation_h + offset_h;
const double w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const double map_h = i * dilation_h + offset_h;
//const double map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const double *data_col, const double *data_offset, const double *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
double *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const double *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const double *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const double offset_h = data_offset_ptr[data_offset_h_ptr];
const double offset_w = data_offset_ptr[data_offset_w_ptr];
const double mask = data_mask_ptr[data_mask_hw_ptr];
const double cur_inv_h_data = h_in + i * dilation_h + offset_h;
const double cur_inv_w_data = w_in + j * dilation_w + offset_w;
const double cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
double weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const double *data_col, const double *data_im,
const double *data_offset, const double *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
double *grad_offset, double *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
double val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const double *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const double *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const double *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const double *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const double offset_h = data_offset_ptr[data_offset_h_ptr];
const double offset_w = data_offset_ptr[data_offset_w_ptr];
const double mask = data_mask_ptr[data_mask_hw_ptr];
double inv_h = h_in + i * dilation_h + offset_h;
double inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const double weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(cudaStream_t stream,
const double *data_im, const double *data_offset, const double *data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, double *data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(cudaStream_t stream,
const double *data_col, const double *data_offset, const double *data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, double *grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(cudaStream_t stream,
const double *data_col, const double *data_im, const double *data_offset, const double *data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
double *grad_offset, double *grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS,
0, stream>>>(
num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset, grad_mask);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
} |
7c598ca2e751a1896a4ead298efe1b484d12424e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "features.cuh"
#define FEATURES_PER_IMAGE (2500 * 5)
#define FEATURE_BUFFER_SIZE_PER_IMAGE (FEATURES_PER_IMAGE * sizeof(feature))
#define TOTAL_FEATURE_BUFFER_SIZE (FEATURE_BUFFER_SIZE_PER_IMAGE * concImages)
#define TOTAL_FEATURE_BUFFER_COUNT (FEATURES_PER_IMAGE * concImages)
// fraction of free VRAM to use
#define FREE_VRAM_USAGE .8
#define THREADS_PER_BLOCK 32
__global__ void findFeatures(uint32_t* imageBuffer, feature* featureBuffer, uint32_t* featureIndex) {
uint32_t imgId = blockIdx.x;
uint32_t* img = &(imageBuffer[IMAGE_SIZE * IMAGE_SIZE * imgId]);
// build the SAT
if (threadIdx.x < 32)
scan2d(img);
__syncthreads();
// copy the SAT to shared memory
__shared__ uint32_t SAT[IMAGE_SIZE * IMAGE_SIZE];
memcpy(SAT, img, IMAGE_SIZE * IMAGE_SIZE * sizeof(uint32_t));
__syncthreads();
// find haar-like features
haarfinder(SAT, featureBuffer, THRESHOLD, featureIndex);
}
void printFeature(feature feat) {
switch (feat.type) {
case HEDGE:
printf("Horiz Edge\n");
break;
case VEDGE:
printf("Vert Edge\n");
break;
case HLINE:
printf("Horiz Line\n");
break;
case VLINE:
printf("Vert Line\n");
break;
case RECT4:
printf("4-Rect\n");
break;
default:
printf("Something else: %d\n", feat.type);
break;
}
printf("Mag: %d\n", feat.mag);
printf("(%d, %d) -> (%d, %d)\n", feat.x1, feat.y1, feat.x2, feat.y2);
}
feature* findFeatures(uint32_t* hostImageBuffer, uint32_t count, uint32_t* numFeatures) {
// get the amount of vram we can allocate for this step
size_t freeMem, totalMem;
hipMemGetInfo(&freeMem, &totalMem);
printf("CUDA memory: Total: %d MB, free: %d MB\n", totalMem / 1024 / 1024, freeMem / 1024 / 1024);
// compute number of images we can process at once
uint32_t concImages = freeMem * FREE_VRAM_USAGE / (FEATURE_BUFFER_SIZE_PER_IMAGE + IMAGE_SIZE * IMAGE_SIZE * sizeof(uint32_t));
uint32_t numKernels = (uint32_t)ceil((float)count / concImages);
if (numKernels == 0) {
printf("Nothing to compute! Returning.\n");
*numFeatures = 0;
return NULL;
}
printf("Computing up to %d images at once using %lu MB of VRAM and %d kernels\n", concImages, concImages * (FEATURE_BUFFER_SIZE_PER_IMAGE + IMAGE_SIZE * IMAGE_SIZE * sizeof(uint32_t)) / 1024 / 1024, numKernels);
printf("Using %d threads\n", THREADS_PER_BLOCK * min(concImages, count));
//feature* hostFeatureBuffer = (feature*)malloc(FEATURE_BUFFER_SIZE * concImages);
uint32_t hostFeatureIndex;
feature* deviceFeatureBuffer;
uint32_t* deviceImageBuffer, *deviceFeatureIndex;
wbCheck(hipMalloc((void**)&deviceFeatureBuffer, TOTAL_FEATURE_BUFFER_SIZE));
wbCheck(hipMalloc((void**)&deviceImageBuffer, IMAGE_SIZE * IMAGE_SIZE * concImages * sizeof(uint32_t)));
wbCheck(hipMalloc((void**)&deviceFeatureIndex, sizeof(uint32_t)));
uint32_t handledImages = 0;
// this will be expanded if necessary
int32_t finishedFeatureBufferSize = concImages * FEATURES_PER_IMAGE;
int32_t numFinishedFeatures = 0;
feature* finishedFeatures = (feature*)malloc(finishedFeatureBufferSize * sizeof(feature));
// the CUDA part
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
// clear feature buffer and copy first batch of images to device
wbCheck(hipMemset(deviceFeatureBuffer, 0, TOTAL_FEATURE_BUFFER_SIZE));
wbCheck(hipMemset(deviceImageBuffer, 0, IMAGE_SIZE * IMAGE_SIZE * concImages * sizeof(uint32_t)));
wbCheck(hipMemset(deviceFeatureIndex, 0, sizeof(uint32_t)));
wbCheck(hipMemcpy(deviceImageBuffer, &(hostImageBuffer[handledImages * IMAGE_SIZE * IMAGE_SIZE]), IMAGE_SIZE * IMAGE_SIZE * min(concImages, count - handledImages) * sizeof(uint32_t), hipMemcpyHostToDevice));
dim3 dimGrid(min(concImages, count - handledImages), 1, 1);
dim3 dimBlock(THREADS_PER_BLOCK, 1, 1);
findFeatures << <dimGrid, dimBlock >> >(deviceImageBuffer, deviceFeatureBuffer, deviceFeatureIndex);
wbCheck(hipPeekAtLastError());
handledImages += min(concImages, count - handledImages);
uint32_t launchedKernels = 0;
while (true) {
launchedKernels++;
// copy feature buffer from device
// hipMemcpy blocks until the previous kernel finishes
// see how much we have to copy
wbCheck(hipMemcpy(&hostFeatureIndex, deviceFeatureIndex, sizeof(uint32_t), hipMemcpyDeviceToHost));
printf("Kernel %u output sized %u elements (%u MB, %u%% full)\n", launchedKernels, hostFeatureIndex, hostFeatureIndex * sizeof(feature) / 1024 / 1024, (uint32_t)((float)hostFeatureIndex * 100 / TOTAL_FEATURE_BUFFER_COUNT));
if (hostFeatureIndex > TOTAL_FEATURE_BUFFER_COUNT)
printf("Buffer overflow by %u features, increase FEATURES_PER_IMAGE or THRESHOLD\n", hostFeatureIndex - TOTAL_FEATURE_BUFFER_COUNT);
// then copy it, but make sure it'll fit first
if (numFinishedFeatures + hostFeatureIndex > finishedFeatureBufferSize) {
printf("Resizing host buffer to %u elements (%u MB)\n", finishedFeatureBufferSize + TOTAL_FEATURE_BUFFER_COUNT, (finishedFeatureBufferSize * sizeof(feature) + TOTAL_FEATURE_BUFFER_SIZE) / 1024 / 1024);
finishedFeatures = (feature*)realloc(finishedFeatures, finishedFeatureBufferSize * sizeof(feature) + TOTAL_FEATURE_BUFFER_SIZE);
finishedFeatureBufferSize += TOTAL_FEATURE_BUFFER_COUNT;
}
printf("Copying buffer to host\n");
wbCheck(hipMemcpy(&(finishedFeatures[numFinishedFeatures]), deviceFeatureBuffer, hostFeatureIndex * sizeof(feature), hipMemcpyDeviceToHost));
numFinishedFeatures += hostFeatureIndex;
if (launchedKernels == numKernels)
break;
// if there are more images to analyze, start them doing
if (handledImages < count) {
// clear feature buffer and copy next batch of images to device
wbCheck(hipMemset(deviceFeatureBuffer, 0, TOTAL_FEATURE_BUFFER_SIZE));
wbCheck(hipMemset(deviceImageBuffer, 0, IMAGE_SIZE * IMAGE_SIZE * concImages * sizeof(uint32_t)));
wbCheck(hipMemset(deviceFeatureIndex, 0, sizeof(uint32_t)));
wbCheck(hipMemcpy(deviceImageBuffer, &(hostImageBuffer[handledImages * IMAGE_SIZE * IMAGE_SIZE]), IMAGE_SIZE * IMAGE_SIZE * min(concImages, count - handledImages) * sizeof(uint32_t), hipMemcpyHostToDevice));
dim3 dimGrid(min(concImages, count - handledImages), 1, 1);
dim3 dimBlock(THREADS_PER_BLOCK, 1, 1);
findFeatures << <dimGrid, dimBlock >> >(deviceImageBuffer, deviceFeatureBuffer, deviceFeatureIndex);
wbCheck(hipPeekAtLastError());
handledImages += min(concImages, count - handledImages);
}
}
// cleanup, cleanup, everybody everywhere
// C cleanup
finishedFeatures = (feature*)realloc(finishedFeatures, numFinishedFeatures * sizeof(feature));
// CUDA cleanup
wbCheck(hipFree(deviceFeatureBuffer));
wbCheck(hipFree(deviceImageBuffer));
wbCheck(hipFree(deviceFeatureIndex));
*numFeatures = numFinishedFeatures;
return finishedFeatures;
} | 7c598ca2e751a1896a4ead298efe1b484d12424e.cu | #include "features.cuh"
#define FEATURES_PER_IMAGE (2500 * 5)
#define FEATURE_BUFFER_SIZE_PER_IMAGE (FEATURES_PER_IMAGE * sizeof(feature))
#define TOTAL_FEATURE_BUFFER_SIZE (FEATURE_BUFFER_SIZE_PER_IMAGE * concImages)
#define TOTAL_FEATURE_BUFFER_COUNT (FEATURES_PER_IMAGE * concImages)
// fraction of free VRAM to use
#define FREE_VRAM_USAGE .8
#define THREADS_PER_BLOCK 32
__global__ void findFeatures(uint32_t* imageBuffer, feature* featureBuffer, uint32_t* featureIndex) {
uint32_t imgId = blockIdx.x;
uint32_t* img = &(imageBuffer[IMAGE_SIZE * IMAGE_SIZE * imgId]);
// build the SAT
if (threadIdx.x < 32)
scan2d(img);
__syncthreads();
// copy the SAT to shared memory
__shared__ uint32_t SAT[IMAGE_SIZE * IMAGE_SIZE];
memcpy(SAT, img, IMAGE_SIZE * IMAGE_SIZE * sizeof(uint32_t));
__syncthreads();
// find haar-like features
haarfinder(SAT, featureBuffer, THRESHOLD, featureIndex);
}
void printFeature(feature feat) {
switch (feat.type) {
case HEDGE:
printf("Horiz Edge\n");
break;
case VEDGE:
printf("Vert Edge\n");
break;
case HLINE:
printf("Horiz Line\n");
break;
case VLINE:
printf("Vert Line\n");
break;
case RECT4:
printf("4-Rect\n");
break;
default:
printf("Something else: %d\n", feat.type);
break;
}
printf("Mag: %d\n", feat.mag);
printf("(%d, %d) -> (%d, %d)\n", feat.x1, feat.y1, feat.x2, feat.y2);
}
feature* findFeatures(uint32_t* hostImageBuffer, uint32_t count, uint32_t* numFeatures) {
// get the amount of vram we can allocate for this step
size_t freeMem, totalMem;
cudaMemGetInfo(&freeMem, &totalMem);
printf("CUDA memory: Total: %d MB, free: %d MB\n", totalMem / 1024 / 1024, freeMem / 1024 / 1024);
// compute number of images we can process at once
uint32_t concImages = freeMem * FREE_VRAM_USAGE / (FEATURE_BUFFER_SIZE_PER_IMAGE + IMAGE_SIZE * IMAGE_SIZE * sizeof(uint32_t));
uint32_t numKernels = (uint32_t)ceil((float)count / concImages);
if (numKernels == 0) {
printf("Nothing to compute! Returning.\n");
*numFeatures = 0;
return NULL;
}
printf("Computing up to %d images at once using %lu MB of VRAM and %d kernels\n", concImages, concImages * (FEATURE_BUFFER_SIZE_PER_IMAGE + IMAGE_SIZE * IMAGE_SIZE * sizeof(uint32_t)) / 1024 / 1024, numKernels);
printf("Using %d threads\n", THREADS_PER_BLOCK * min(concImages, count));
//feature* hostFeatureBuffer = (feature*)malloc(FEATURE_BUFFER_SIZE * concImages);
uint32_t hostFeatureIndex;
feature* deviceFeatureBuffer;
uint32_t* deviceImageBuffer, *deviceFeatureIndex;
wbCheck(cudaMalloc((void**)&deviceFeatureBuffer, TOTAL_FEATURE_BUFFER_SIZE));
wbCheck(cudaMalloc((void**)&deviceImageBuffer, IMAGE_SIZE * IMAGE_SIZE * concImages * sizeof(uint32_t)));
wbCheck(cudaMalloc((void**)&deviceFeatureIndex, sizeof(uint32_t)));
uint32_t handledImages = 0;
// this will be expanded if necessary
int32_t finishedFeatureBufferSize = concImages * FEATURES_PER_IMAGE;
int32_t numFinishedFeatures = 0;
feature* finishedFeatures = (feature*)malloc(finishedFeatureBufferSize * sizeof(feature));
// the CUDA part
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
// clear feature buffer and copy first batch of images to device
wbCheck(cudaMemset(deviceFeatureBuffer, 0, TOTAL_FEATURE_BUFFER_SIZE));
wbCheck(cudaMemset(deviceImageBuffer, 0, IMAGE_SIZE * IMAGE_SIZE * concImages * sizeof(uint32_t)));
wbCheck(cudaMemset(deviceFeatureIndex, 0, sizeof(uint32_t)));
wbCheck(cudaMemcpy(deviceImageBuffer, &(hostImageBuffer[handledImages * IMAGE_SIZE * IMAGE_SIZE]), IMAGE_SIZE * IMAGE_SIZE * min(concImages, count - handledImages) * sizeof(uint32_t), cudaMemcpyHostToDevice));
dim3 dimGrid(min(concImages, count - handledImages), 1, 1);
dim3 dimBlock(THREADS_PER_BLOCK, 1, 1);
findFeatures << <dimGrid, dimBlock >> >(deviceImageBuffer, deviceFeatureBuffer, deviceFeatureIndex);
wbCheck(cudaPeekAtLastError());
handledImages += min(concImages, count - handledImages);
uint32_t launchedKernels = 0;
while (true) {
launchedKernels++;
// copy feature buffer from device
// cudaMemcpy blocks until the previous kernel finishes
// see how much we have to copy
wbCheck(cudaMemcpy(&hostFeatureIndex, deviceFeatureIndex, sizeof(uint32_t), cudaMemcpyDeviceToHost));
printf("Kernel %u output sized %u elements (%u MB, %u%% full)\n", launchedKernels, hostFeatureIndex, hostFeatureIndex * sizeof(feature) / 1024 / 1024, (uint32_t)((float)hostFeatureIndex * 100 / TOTAL_FEATURE_BUFFER_COUNT));
if (hostFeatureIndex > TOTAL_FEATURE_BUFFER_COUNT)
printf("Buffer overflow by %u features, increase FEATURES_PER_IMAGE or THRESHOLD\n", hostFeatureIndex - TOTAL_FEATURE_BUFFER_COUNT);
// then copy it, but make sure it'll fit first
if (numFinishedFeatures + hostFeatureIndex > finishedFeatureBufferSize) {
printf("Resizing host buffer to %u elements (%u MB)\n", finishedFeatureBufferSize + TOTAL_FEATURE_BUFFER_COUNT, (finishedFeatureBufferSize * sizeof(feature) + TOTAL_FEATURE_BUFFER_SIZE) / 1024 / 1024);
finishedFeatures = (feature*)realloc(finishedFeatures, finishedFeatureBufferSize * sizeof(feature) + TOTAL_FEATURE_BUFFER_SIZE);
finishedFeatureBufferSize += TOTAL_FEATURE_BUFFER_COUNT;
}
printf("Copying buffer to host\n");
wbCheck(cudaMemcpy(&(finishedFeatures[numFinishedFeatures]), deviceFeatureBuffer, hostFeatureIndex * sizeof(feature), cudaMemcpyDeviceToHost));
numFinishedFeatures += hostFeatureIndex;
if (launchedKernels == numKernels)
break;
// if there are more images to analyze, start them doing
if (handledImages < count) {
// clear feature buffer and copy next batch of images to device
wbCheck(cudaMemset(deviceFeatureBuffer, 0, TOTAL_FEATURE_BUFFER_SIZE));
wbCheck(cudaMemset(deviceImageBuffer, 0, IMAGE_SIZE * IMAGE_SIZE * concImages * sizeof(uint32_t)));
wbCheck(cudaMemset(deviceFeatureIndex, 0, sizeof(uint32_t)));
wbCheck(cudaMemcpy(deviceImageBuffer, &(hostImageBuffer[handledImages * IMAGE_SIZE * IMAGE_SIZE]), IMAGE_SIZE * IMAGE_SIZE * min(concImages, count - handledImages) * sizeof(uint32_t), cudaMemcpyHostToDevice));
dim3 dimGrid(min(concImages, count - handledImages), 1, 1);
dim3 dimBlock(THREADS_PER_BLOCK, 1, 1);
findFeatures << <dimGrid, dimBlock >> >(deviceImageBuffer, deviceFeatureBuffer, deviceFeatureIndex);
wbCheck(cudaPeekAtLastError());
handledImages += min(concImages, count - handledImages);
}
}
// cleanup, cleanup, everybody everywhere
// C cleanup
finishedFeatures = (feature*)realloc(finishedFeatures, numFinishedFeatures * sizeof(feature));
// CUDA cleanup
wbCheck(cudaFree(deviceFeatureBuffer));
wbCheck(cudaFree(deviceImageBuffer));
wbCheck(cudaFree(deviceFeatureIndex));
*numFeatures = numFinishedFeatures;
return finishedFeatures;
} |
b7606609d3bdf2b685f2b4df2ff82c1511b3b07f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <tune_quda.h>
#include <quda_matrix.h>
#include <unitarization_links.h>
#include <su3_project.cuh>
#include <index_helper.cuh>
namespace quda{
#ifdef GPU_UNITARIZE
namespace{
#include <svd_quda.h>
}
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
#ifndef FL_UNITARIZE_PI23
#define FL_UNITARIZE_PI23 FL_UNITARIZE_PI*0.66666666666666666666
#endif
static const int max_iter_newton = 20;
static const int max_iter = 20;
static double unitarize_eps = 1e-14;
static double max_error = 1e-10;
static int reunit_allow_svd = 1;
static int reunit_svd_only = 0;
static double svd_rel_error = 1e-6;
static double svd_abs_error = 1e-6;
template <typename Out, typename In>
struct UnitarizeLinksArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
Out output;
const In input;
int *fails;
const int max_iter;
const double unitarize_eps;
const double max_error;
const int reunit_allow_svd;
const int reunit_svd_only;
const double svd_rel_error;
const double svd_abs_error;
const static bool check_unitarization = true;
UnitarizeLinksArg(Out &output, const In &input, const GaugeField &data, int* fails,
int max_iter, double unitarize_eps, double max_error,
int reunit_allow_svd, int reunit_svd_only, double svd_rel_error,
double svd_abs_error)
: threads(data.VolumeCB()), output(output), input(input), fails(fails), unitarize_eps(unitarize_eps),
max_iter(max_iter), max_error(max_error), reunit_allow_svd(reunit_allow_svd),
reunit_svd_only(reunit_svd_only), svd_rel_error(svd_rel_error),
svd_abs_error(svd_abs_error)
{
for (int dir=0; dir<4; ++dir) X[dir] = data.X()[dir];
}
};
#endif // GPU_UNITARIZE
void setUnitarizeLinksConstants(double unitarize_eps_, double max_error_,
bool reunit_allow_svd_, bool reunit_svd_only_,
double svd_rel_error_, double svd_abs_error_) {
#ifdef GPU_UNITARIZE
unitarize_eps = unitarize_eps_;
max_error = max_error_;
reunit_allow_svd = reunit_allow_svd_;
reunit_svd_only = reunit_svd_only_;
svd_rel_error = svd_rel_error_;
svd_abs_error = svd_abs_error_;
#else
errorQuda("Unitarization has not been built");
#endif
}
#ifdef GPU_UNITARIZE
template<class Cmplx>
__device__ __host__
bool isUnitarizedLinkConsistent(const Matrix<Cmplx,3>& initial_matrix,
const Matrix<Cmplx,3>& unitary_matrix,
double max_error)
{
Matrix<Cmplx,3> temporary;
temporary = conj(initial_matrix)*unitary_matrix;
temporary = temporary*temporary - conj(initial_matrix)*initial_matrix;
for(int i=0; i<3; ++i){
for(int j=0; j<3; ++j){
if( fabs(temporary(i,j).x) > max_error || fabs(temporary(i,j).y) > max_error){
return false;
}
}
}
return true;
}
template<class T>
__device__ __host__
T getAbsMin(const T* const array, int size){
T min = fabs(array[0]);
for(int i=1; i<size; ++i){
T abs_val = fabs(array[i]);
if((abs_val) < min){ min = abs_val; }
}
return min;
}
template<class Real>
__device__ __host__
inline bool checkAbsoluteError(Real a, Real b, Real epsilon)
{
if( fabs(a-b) < epsilon) return true;
return false;
}
template<class Real>
__device__ __host__
inline bool checkRelativeError(Real a, Real b, Real epsilon)
{
if( fabs((a-b)/b) < epsilon ) return true;
return false;
}
// Compute the reciprocal square root of the matrix q
// Also modify q if the eigenvalues are dangerously small.
template<class Float, typename Arg>
__device__ __host__
bool reciprocalRoot(const Matrix<complex<Float>,3>& q, Matrix<complex<Float>,3>* res, Arg &arg){
Matrix<complex<Float>,3> qsq, tempq;
Float c[3];
Float g[3];
const Float one_third = 0.333333333333333333333;
const Float one_ninth = 0.111111111111111111111;
const Float one_eighteenth = 0.055555555555555555555;
qsq = q*q;
tempq = qsq*q;
c[0] = getTrace(q).x;
c[1] = getTrace(qsq).x * 0.5;
c[2] = getTrace(tempq).x * one_third;;
g[0] = g[1] = g[2] = c[0] * one_third;
Float r,s,theta;
s = c[1]*one_third - c[0]*c[0]*one_eighteenth;
Float cosTheta;
if(fabs(s) >= arg.unitarize_eps){ // faster when this conditional is removed?
const Float rsqrt_s = rsqrt(s);
r = c[2]*0.5 - (c[0]*one_third)*(c[1] - c[0]*c[0]*one_ninth);
cosTheta = r*rsqrt_s*rsqrt_s*rsqrt_s;
if(fabs(cosTheta) >= 1.0){
theta = (r > 0) ? 0.0 : FL_UNITARIZE_PI;
}else{
theta = acos(cosTheta); // this is the primary performance limiter
}
const Float sqrt_s = s*rsqrt_s;
#if 0 // experimental version
Float as, ac;
sincos( theta*one_third, &as, &ac );
g[0] = c[0]*one_third + 2*sqrt_s*ac;
//g[1] = c[0]*one_third + 2*sqrt_s*(ac*cos(1*FL_UNITARIZE_PI23) - as*sin(1*FL_UNITARIZE_PI23));
g[1] = c[0]*one_third - 2*sqrt_s*(0.5*ac + as*0.8660254037844386467637);
//g[2] = c[0]*one_third + 2*sqrt_s*(ac*cos(2*FL_UNITARIZE_PI23) - as*sin(2*FL_UNITARIZE_PI23));
g[2] = c[0]*one_third + 2*sqrt_s*(-0.5*ac + as*0.8660254037844386467637);
#else
g[0] = c[0]*one_third + 2*sqrt_s*cos( theta*one_third );
g[1] = c[0]*one_third + 2*sqrt_s*cos( theta*one_third + FL_UNITARIZE_PI23 );
g[2] = c[0]*one_third + 2*sqrt_s*cos( theta*one_third + 2*FL_UNITARIZE_PI23 );
#endif
}
// Check the eigenvalues, if the determinant does not match the product of the eigenvalues
// return false. Then call SVD instead.
Float det = getDeterminant(q).x;
if( fabs(det) < arg.svd_abs_error) return false;
if( checkRelativeError(g[0]*g[1]*g[2],det,arg.svd_rel_error) == false ) return false;
// At this point we have finished with the c's
// use these to store sqrt(g)
for(int i=0; i<3; ++i) c[i] = sqrt(g[i]);
// done with the g's, use these to store u, v, w
g[0] = c[0]+c[1]+c[2];
g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2];
g[2] = c[0]*c[1]*c[2];
const Float denominator = 1.0 / ( g[2]*(g[0]*g[1]-g[2]) );
c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1])) * denominator;
c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1]) * denominator;
c[2] = g[0] * denominator;
tempq = c[1]*q + c[2]*qsq;
// Add a real scalar
tempq(0,0).x += c[0];
tempq(1,1).x += c[0];
tempq(2,2).x += c[0];
*res = tempq;
return true;
}
template<class Float, typename Arg>
__host__ __device__
bool unitarizeLinkMILC(const Matrix<complex<Float>,3>& in, Matrix<complex<Float>,3>* const result, Arg &arg)
{
Matrix<complex<Float>,3> u;
if( !arg.reunit_svd_only ){
if( reciprocalRoot<Float>(conj(in)*in,&u,arg) ){
*result = in*u;
return true;
}
}
// If we've got this far, then the Caley-Hamilton unitarization
// has failed. If SVD is not allowed, the unitarization has failed.
if( !arg.reunit_allow_svd ) return false;
Matrix<complex<Float>,3> v;
Float singular_values[3];
computeSVD<Float>(in, u, v, singular_values);
*result = u*conj(v);
return true;
} // unitarizeMILC
template<class Float>
__host__ __device__
bool unitarizeLinkSVD(const Matrix<complex<Float>,3>& in, Matrix<complex<Float>,3>* const result,
const double max_error)
{
Matrix<complex<Float>,3> u, v;
Float singular_values[3];
computeSVD<Float>(in, u, v, singular_values); // should pass pointers to u,v I guess
*result = u*conj(v);
if (result->isUnitary(max_error)==false)
{
printf("ERROR: Link unitarity test failed\n");
printf("TOLERANCE: %g\n", max_error);
return false;
}
return true;
}
template<class Float>
__host__ __device__
bool unitarizeLinkNewton(const Matrix<complex<Float>,3>& in, Matrix<complex<Float>,3>* const result, int max_iter)
{
Matrix<complex<Float>,3> u, uinv;
u = in;
for(int i=0; i<max_iter; ++i){
uinv = inverse(u);
u = 0.5*(u + conj(uinv));
}
if(isUnitarizedLinkConsistent(in,u,0.0000001)==false)
{
printf("ERROR: Unitarized link is not consistent with incoming link\n");
return false;
}
*result = u;
return true;
}
#endif // GPU_UNITARIZE
void unitarizeLinksCPU(cpuGaugeField &outfield, const cpuGaugeField& infield)
{
#ifdef GPU_UNITARIZE
if (infield.Precision() != outfield.Precision())
errorQuda("Precisions must match (out=%d != in=%d)", outfield.Precision(), infield.Precision());
int num_failures = 0;
Matrix<complex<double>,3> inlink, outlink;
for (int i=0; i<infield.Volume(); ++i){
for (int dir=0; dir<4; ++dir){
if (infield.Precision() == QUDA_SINGLE_PRECISION){
copyArrayToLink(&inlink, ((float*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double>(inlink, &outlink, max_iter_newton) == false ) num_failures++;
copyLinkToArray(((float*)(outfield.Gauge_p()) + (i*4 + dir)*18), outlink);
} else if (infield.Precision() == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&inlink, ((double*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double>(inlink, &outlink, max_iter_newton) == false ) num_failures++;
copyLinkToArray(((double*)(outfield.Gauge_p()) + (i*4 + dir)*18), outlink);
} // precision?
} // dir
} // loop over volume
return;
#else
errorQuda("Unitarization has not been built");
#endif
}
// CPU function which checks that the gauge field is unitary
bool isUnitary(const cpuGaugeField& field, double max_error)
{
#ifdef GPU_UNITARIZE
Matrix<complex<double>,3> link, identity;
for(int i=0; i<field.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(field.Precision() == QUDA_SINGLE_PRECISION){
copyArrayToLink(&link, ((float*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else if(field.Precision() == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&link, ((double*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else{
errorQuda("Unsupported precision\n");
}
if (link.isUnitary(max_error) == false) {
printf("Unitarity failure\n");
printf("site index = %d,\t direction = %d\n", i, dir);
printLink(link);
identity = conj(link)*link;
printLink(identity);
return false;
}
} // dir
} // i
return true;
#else
errorQuda("Unitarization has not been built");
return false;
#endif
} // is unitary
#ifdef GPU_UNITARIZE
template<typename Float, typename Out, typename In>
__global__ void DoUnitarizedLink(UnitarizeLinksArg<Out,In> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y + blockIdx.y*blockDim.y;
int mu = threadIdx.z + blockIdx.z*blockDim.z;
if (idx >= arg.threads) return;
if (mu >= 4) return;
// result is always in double precision
Matrix<complex<double>,3> v, result;
Matrix<complex<Float>,3> tmp = arg.input(mu, idx, parity);
v = tmp;
unitarizeLinkMILC(v, &result, arg);
if (arg.check_unitarization) {
if (result.isUnitary(arg.max_error) == false) atomicAdd(arg.fails, 1);
}
tmp = result;
arg.output(mu, idx, parity) = tmp;
}
template<typename Float, typename Out, typename In>
class UnitarizeLinks : TunableVectorYZ {
UnitarizeLinksArg<Out,In> arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.threads; }
public:
UnitarizeLinks(UnitarizeLinksArg<Out,In> &arg, const GaugeField &meta)
: TunableVectorYZ(2,4), arg(arg), meta(meta) { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( DoUnitarizedLink<Float,Out,In>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
}
void preTune() { if (arg.input.gauge == arg.output.gauge) arg.output.save(); }
void postTune() {
if (arg.input.gauge == arg.output.gauge) arg.output.load();
hipMemset(arg.fails, 0, sizeof(int)); // reset fails counter
}
long long flops() const {
// Accounted only the minimum flops for the case reunitarize_svd_only=0
return 4ll * 2 * arg.threads * 1147;
}
long long bytes() const { return 4ll * 2 * arg.threads * (arg.input.Bytes() + arg.output.Bytes()); }
TuneKey tuneKey() const {
std::stringstream aux;
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str());
}
};
template<typename Float, typename Out, typename In>
void unitarizeLinks(Out output, const In input, const cudaGaugeField& meta, int* fails) {
UnitarizeLinksArg<Out,In> arg(output, input, meta, fails, max_iter, unitarize_eps, max_error,
reunit_allow_svd, reunit_svd_only, svd_rel_error, svd_abs_error);
UnitarizeLinks<Float, Out, In> unitlinks(arg, meta);
unitlinks.apply(0);
qudaDeviceSynchronize(); // need to synchronize to ensure failure write has completed
}
template<typename Float>
void unitarizeLinks(cudaGaugeField& output, const cudaGaugeField &input, int* fails) {
if( output.isNative() && input.isNative() ) {
if(output.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Out;
if(input.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else {
errorQuda("Reconstruction type %d of gauge field not supported", input.Reconstruct());
}
} else if(output.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Out;
if(input.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else {
errorQuda("Reconstruction type %d of gauge field not supported", input.Reconstruct());
}
} else if(output.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Out;
if(input.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else {
errorQuda("Reconstruction type %d of gauge field not supported", input.Reconstruct());
}
} else {
errorQuda("Reconstruction type %d of gauge field not supported", output.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order (output=%d, input=%d)", output.Order(), input.Order());
}
}
#endif // GPU_UNITARIZE
void unitarizeLinks(cudaGaugeField& output, const cudaGaugeField &input, int* fails) {
#ifdef GPU_UNITARIZE
if (input.Precision() != output.Precision())
errorQuda("input (%d) and output (%d) precisions must match", output.Precision(), input.Precision());
if (input.Precision() == QUDA_SINGLE_PRECISION) {
unitarizeLinks<float>(output, input, fails);
} else if(input.Precision() == QUDA_DOUBLE_PRECISION) {
unitarizeLinks<double>(output, input, fails);
} else {
errorQuda("Precision %d not supported", input.Precision());
}
#else
errorQuda("Unitarization has not been built");
#endif
}
void unitarizeLinks(cudaGaugeField &links, int* fails) {
unitarizeLinks(links, links, fails);
}
template <typename Float, typename G>
struct ProjectSU3Arg {
int threads; // number of active threads required
G u;
Float tol;
int *fails;
ProjectSU3Arg(G u, const GaugeField &meta, Float tol, int *fails)
: threads(meta.VolumeCB()), u(u), tol(tol), fails(fails) { }
};
template<typename Float, typename G>
__global__ void ProjectSU3kernel(ProjectSU3Arg<Float,G> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y + blockIdx.y*blockDim.y;
int mu = threadIdx.z + blockIdx.z*blockDim.z;
if (idx >= arg.threads) return;
if (mu >= 4) return;
Matrix<complex<Float>,3> u = arg.u(mu, idx, parity);
polarSu3<Float>(u, arg.tol);
// count number of failures
if (u.isUnitary(arg.tol) == false) {
atomicAdd(arg.fails, 1);
}
arg.u(mu, idx, parity) = u;
}
template<typename Float, typename G>
class ProjectSU3 : TunableVectorYZ {
ProjectSU3Arg<Float,G> arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.threads; }
public:
ProjectSU3(ProjectSU3Arg<Float,G> &arg, const GaugeField &meta)
: TunableVectorYZ(2, 4), arg(arg), meta(meta) { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( ProjectSU3kernel<Float,G>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
}
void preTune() { arg.u.save(); }
void postTune() {
arg.u.load();
hipMemset(arg.fails, 0, sizeof(int)); // reset fails counter
}
long long flops() const { return 0; } // depends on number of iterations
long long bytes() const { return 4ll * 2 * arg.threads * 2 * arg.u.Bytes(); }
TuneKey tuneKey() const {
std::stringstream aux;
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str());
}
};
template <typename Float>
void projectSU3(cudaGaugeField &u, double tol, int *fails) {
if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
ProjectSU3Arg<Float,G> arg(G(u), u, static_cast<Float>(tol), fails);
ProjectSU3<Float,G> project(arg, u);
project.apply(0);
qudaDeviceSynchronize();
checkCudaError();
} else {
errorQuda("Reconstruct %d not supported", u.Reconstruct());
}
}
void projectSU3(cudaGaugeField &u, double tol, int *fails) {
#ifdef GPU_UNITARIZE
// check the the field doesn't have staggered phases applied
if (u.StaggeredPhaseApplied())
errorQuda("Cannot project gauge field with staggered phases applied");
if (u.Precision() == QUDA_DOUBLE_PRECISION) {
projectSU3<double>(u, tol, fails);
} else if (u.Precision() == QUDA_SINGLE_PRECISION) {
projectSU3<float>(u, tol, fails);
} else {
errorQuda("Precision %d not supported", u.Precision());
}
#else
errorQuda("Unitarization has not been built");
#endif
}
} // namespace quda
| b7606609d3bdf2b685f2b4df2ff82c1511b3b07f.cu | #include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <tune_quda.h>
#include <quda_matrix.h>
#include <unitarization_links.h>
#include <su3_project.cuh>
#include <index_helper.cuh>
namespace quda{
#ifdef GPU_UNITARIZE
namespace{
#include <svd_quda.h>
}
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
#ifndef FL_UNITARIZE_PI23
#define FL_UNITARIZE_PI23 FL_UNITARIZE_PI*0.66666666666666666666
#endif
static const int max_iter_newton = 20;
static const int max_iter = 20;
static double unitarize_eps = 1e-14;
static double max_error = 1e-10;
static int reunit_allow_svd = 1;
static int reunit_svd_only = 0;
static double svd_rel_error = 1e-6;
static double svd_abs_error = 1e-6;
template <typename Out, typename In>
struct UnitarizeLinksArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
Out output;
const In input;
int *fails;
const int max_iter;
const double unitarize_eps;
const double max_error;
const int reunit_allow_svd;
const int reunit_svd_only;
const double svd_rel_error;
const double svd_abs_error;
const static bool check_unitarization = true;
UnitarizeLinksArg(Out &output, const In &input, const GaugeField &data, int* fails,
int max_iter, double unitarize_eps, double max_error,
int reunit_allow_svd, int reunit_svd_only, double svd_rel_error,
double svd_abs_error)
: threads(data.VolumeCB()), output(output), input(input), fails(fails), unitarize_eps(unitarize_eps),
max_iter(max_iter), max_error(max_error), reunit_allow_svd(reunit_allow_svd),
reunit_svd_only(reunit_svd_only), svd_rel_error(svd_rel_error),
svd_abs_error(svd_abs_error)
{
for (int dir=0; dir<4; ++dir) X[dir] = data.X()[dir];
}
};
#endif // GPU_UNITARIZE
void setUnitarizeLinksConstants(double unitarize_eps_, double max_error_,
bool reunit_allow_svd_, bool reunit_svd_only_,
double svd_rel_error_, double svd_abs_error_) {
#ifdef GPU_UNITARIZE
unitarize_eps = unitarize_eps_;
max_error = max_error_;
reunit_allow_svd = reunit_allow_svd_;
reunit_svd_only = reunit_svd_only_;
svd_rel_error = svd_rel_error_;
svd_abs_error = svd_abs_error_;
#else
errorQuda("Unitarization has not been built");
#endif
}
#ifdef GPU_UNITARIZE
template<class Cmplx>
__device__ __host__
bool isUnitarizedLinkConsistent(const Matrix<Cmplx,3>& initial_matrix,
const Matrix<Cmplx,3>& unitary_matrix,
double max_error)
{
Matrix<Cmplx,3> temporary;
temporary = conj(initial_matrix)*unitary_matrix;
temporary = temporary*temporary - conj(initial_matrix)*initial_matrix;
for(int i=0; i<3; ++i){
for(int j=0; j<3; ++j){
if( fabs(temporary(i,j).x) > max_error || fabs(temporary(i,j).y) > max_error){
return false;
}
}
}
return true;
}
template<class T>
__device__ __host__
T getAbsMin(const T* const array, int size){
T min = fabs(array[0]);
for(int i=1; i<size; ++i){
T abs_val = fabs(array[i]);
if((abs_val) < min){ min = abs_val; }
}
return min;
}
template<class Real>
__device__ __host__
inline bool checkAbsoluteError(Real a, Real b, Real epsilon)
{
if( fabs(a-b) < epsilon) return true;
return false;
}
template<class Real>
__device__ __host__
inline bool checkRelativeError(Real a, Real b, Real epsilon)
{
if( fabs((a-b)/b) < epsilon ) return true;
return false;
}
// Compute the reciprocal square root of the matrix q
// Also modify q if the eigenvalues are dangerously small.
template<class Float, typename Arg>
__device__ __host__
bool reciprocalRoot(const Matrix<complex<Float>,3>& q, Matrix<complex<Float>,3>* res, Arg &arg){
Matrix<complex<Float>,3> qsq, tempq;
Float c[3];
Float g[3];
const Float one_third = 0.333333333333333333333;
const Float one_ninth = 0.111111111111111111111;
const Float one_eighteenth = 0.055555555555555555555;
qsq = q*q;
tempq = qsq*q;
c[0] = getTrace(q).x;
c[1] = getTrace(qsq).x * 0.5;
c[2] = getTrace(tempq).x * one_third;;
g[0] = g[1] = g[2] = c[0] * one_third;
Float r,s,theta;
s = c[1]*one_third - c[0]*c[0]*one_eighteenth;
Float cosTheta;
if(fabs(s) >= arg.unitarize_eps){ // faster when this conditional is removed?
const Float rsqrt_s = rsqrt(s);
r = c[2]*0.5 - (c[0]*one_third)*(c[1] - c[0]*c[0]*one_ninth);
cosTheta = r*rsqrt_s*rsqrt_s*rsqrt_s;
if(fabs(cosTheta) >= 1.0){
theta = (r > 0) ? 0.0 : FL_UNITARIZE_PI;
}else{
theta = acos(cosTheta); // this is the primary performance limiter
}
const Float sqrt_s = s*rsqrt_s;
#if 0 // experimental version
Float as, ac;
sincos( theta*one_third, &as, &ac );
g[0] = c[0]*one_third + 2*sqrt_s*ac;
//g[1] = c[0]*one_third + 2*sqrt_s*(ac*cos(1*FL_UNITARIZE_PI23) - as*sin(1*FL_UNITARIZE_PI23));
g[1] = c[0]*one_third - 2*sqrt_s*(0.5*ac + as*0.8660254037844386467637);
//g[2] = c[0]*one_third + 2*sqrt_s*(ac*cos(2*FL_UNITARIZE_PI23) - as*sin(2*FL_UNITARIZE_PI23));
g[2] = c[0]*one_third + 2*sqrt_s*(-0.5*ac + as*0.8660254037844386467637);
#else
g[0] = c[0]*one_third + 2*sqrt_s*cos( theta*one_third );
g[1] = c[0]*one_third + 2*sqrt_s*cos( theta*one_third + FL_UNITARIZE_PI23 );
g[2] = c[0]*one_third + 2*sqrt_s*cos( theta*one_third + 2*FL_UNITARIZE_PI23 );
#endif
}
// Check the eigenvalues, if the determinant does not match the product of the eigenvalues
// return false. Then call SVD instead.
Float det = getDeterminant(q).x;
if( fabs(det) < arg.svd_abs_error) return false;
if( checkRelativeError(g[0]*g[1]*g[2],det,arg.svd_rel_error) == false ) return false;
// At this point we have finished with the c's
// use these to store sqrt(g)
for(int i=0; i<3; ++i) c[i] = sqrt(g[i]);
// done with the g's, use these to store u, v, w
g[0] = c[0]+c[1]+c[2];
g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2];
g[2] = c[0]*c[1]*c[2];
const Float denominator = 1.0 / ( g[2]*(g[0]*g[1]-g[2]) );
c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1])) * denominator;
c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1]) * denominator;
c[2] = g[0] * denominator;
tempq = c[1]*q + c[2]*qsq;
// Add a real scalar
tempq(0,0).x += c[0];
tempq(1,1).x += c[0];
tempq(2,2).x += c[0];
*res = tempq;
return true;
}
template<class Float, typename Arg>
__host__ __device__
bool unitarizeLinkMILC(const Matrix<complex<Float>,3>& in, Matrix<complex<Float>,3>* const result, Arg &arg)
{
Matrix<complex<Float>,3> u;
if( !arg.reunit_svd_only ){
if( reciprocalRoot<Float>(conj(in)*in,&u,arg) ){
*result = in*u;
return true;
}
}
// If we've got this far, then the Caley-Hamilton unitarization
// has failed. If SVD is not allowed, the unitarization has failed.
if( !arg.reunit_allow_svd ) return false;
Matrix<complex<Float>,3> v;
Float singular_values[3];
computeSVD<Float>(in, u, v, singular_values);
*result = u*conj(v);
return true;
} // unitarizeMILC
template<class Float>
__host__ __device__
bool unitarizeLinkSVD(const Matrix<complex<Float>,3>& in, Matrix<complex<Float>,3>* const result,
const double max_error)
{
Matrix<complex<Float>,3> u, v;
Float singular_values[3];
computeSVD<Float>(in, u, v, singular_values); // should pass pointers to u,v I guess
*result = u*conj(v);
if (result->isUnitary(max_error)==false)
{
printf("ERROR: Link unitarity test failed\n");
printf("TOLERANCE: %g\n", max_error);
return false;
}
return true;
}
template<class Float>
__host__ __device__
bool unitarizeLinkNewton(const Matrix<complex<Float>,3>& in, Matrix<complex<Float>,3>* const result, int max_iter)
{
Matrix<complex<Float>,3> u, uinv;
u = in;
for(int i=0; i<max_iter; ++i){
uinv = inverse(u);
u = 0.5*(u + conj(uinv));
}
if(isUnitarizedLinkConsistent(in,u,0.0000001)==false)
{
printf("ERROR: Unitarized link is not consistent with incoming link\n");
return false;
}
*result = u;
return true;
}
#endif // GPU_UNITARIZE
void unitarizeLinksCPU(cpuGaugeField &outfield, const cpuGaugeField& infield)
{
#ifdef GPU_UNITARIZE
if (infield.Precision() != outfield.Precision())
errorQuda("Precisions must match (out=%d != in=%d)", outfield.Precision(), infield.Precision());
int num_failures = 0;
Matrix<complex<double>,3> inlink, outlink;
for (int i=0; i<infield.Volume(); ++i){
for (int dir=0; dir<4; ++dir){
if (infield.Precision() == QUDA_SINGLE_PRECISION){
copyArrayToLink(&inlink, ((float*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double>(inlink, &outlink, max_iter_newton) == false ) num_failures++;
copyLinkToArray(((float*)(outfield.Gauge_p()) + (i*4 + dir)*18), outlink);
} else if (infield.Precision() == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&inlink, ((double*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double>(inlink, &outlink, max_iter_newton) == false ) num_failures++;
copyLinkToArray(((double*)(outfield.Gauge_p()) + (i*4 + dir)*18), outlink);
} // precision?
} // dir
} // loop over volume
return;
#else
errorQuda("Unitarization has not been built");
#endif
}
// CPU function which checks that the gauge field is unitary
bool isUnitary(const cpuGaugeField& field, double max_error)
{
#ifdef GPU_UNITARIZE
Matrix<complex<double>,3> link, identity;
for(int i=0; i<field.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(field.Precision() == QUDA_SINGLE_PRECISION){
copyArrayToLink(&link, ((float*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else if(field.Precision() == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&link, ((double*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else{
errorQuda("Unsupported precision\n");
}
if (link.isUnitary(max_error) == false) {
printf("Unitarity failure\n");
printf("site index = %d,\t direction = %d\n", i, dir);
printLink(link);
identity = conj(link)*link;
printLink(identity);
return false;
}
} // dir
} // i
return true;
#else
errorQuda("Unitarization has not been built");
return false;
#endif
} // is unitary
#ifdef GPU_UNITARIZE
template<typename Float, typename Out, typename In>
__global__ void DoUnitarizedLink(UnitarizeLinksArg<Out,In> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y + blockIdx.y*blockDim.y;
int mu = threadIdx.z + blockIdx.z*blockDim.z;
if (idx >= arg.threads) return;
if (mu >= 4) return;
// result is always in double precision
Matrix<complex<double>,3> v, result;
Matrix<complex<Float>,3> tmp = arg.input(mu, idx, parity);
v = tmp;
unitarizeLinkMILC(v, &result, arg);
if (arg.check_unitarization) {
if (result.isUnitary(arg.max_error) == false) atomicAdd(arg.fails, 1);
}
tmp = result;
arg.output(mu, idx, parity) = tmp;
}
template<typename Float, typename Out, typename In>
class UnitarizeLinks : TunableVectorYZ {
UnitarizeLinksArg<Out,In> arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.threads; }
public:
UnitarizeLinks(UnitarizeLinksArg<Out,In> &arg, const GaugeField &meta)
: TunableVectorYZ(2,4), arg(arg), meta(meta) { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
DoUnitarizedLink<Float,Out,In><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
}
void preTune() { if (arg.input.gauge == arg.output.gauge) arg.output.save(); }
void postTune() {
if (arg.input.gauge == arg.output.gauge) arg.output.load();
cudaMemset(arg.fails, 0, sizeof(int)); // reset fails counter
}
long long flops() const {
// Accounted only the minimum flops for the case reunitarize_svd_only=0
return 4ll * 2 * arg.threads * 1147;
}
long long bytes() const { return 4ll * 2 * arg.threads * (arg.input.Bytes() + arg.output.Bytes()); }
TuneKey tuneKey() const {
std::stringstream aux;
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str());
}
};
template<typename Float, typename Out, typename In>
void unitarizeLinks(Out output, const In input, const cudaGaugeField& meta, int* fails) {
UnitarizeLinksArg<Out,In> arg(output, input, meta, fails, max_iter, unitarize_eps, max_error,
reunit_allow_svd, reunit_svd_only, svd_rel_error, svd_abs_error);
UnitarizeLinks<Float, Out, In> unitlinks(arg, meta);
unitlinks.apply(0);
qudaDeviceSynchronize(); // need to synchronize to ensure failure write has completed
}
template<typename Float>
void unitarizeLinks(cudaGaugeField& output, const cudaGaugeField &input, int* fails) {
if( output.isNative() && input.isNative() ) {
if(output.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Out;
if(input.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else {
errorQuda("Reconstruction type %d of gauge field not supported", input.Reconstruct());
}
} else if(output.Reconstruct() == QUDA_RECONSTRUCT_12){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Out;
if(input.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else {
errorQuda("Reconstruction type %d of gauge field not supported", input.Reconstruct());
}
} else if(output.Reconstruct() == QUDA_RECONSTRUCT_8){
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Out;
if(input.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_12) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else if(input.Reconstruct() == QUDA_RECONSTRUCT_8) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type In;
unitarizeLinks<Float>(Out(output), In(input), input, fails) ;
} else {
errorQuda("Reconstruction type %d of gauge field not supported", input.Reconstruct());
}
} else {
errorQuda("Reconstruction type %d of gauge field not supported", output.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order (output=%d, input=%d)", output.Order(), input.Order());
}
}
#endif // GPU_UNITARIZE
void unitarizeLinks(cudaGaugeField& output, const cudaGaugeField &input, int* fails) {
#ifdef GPU_UNITARIZE
if (input.Precision() != output.Precision())
errorQuda("input (%d) and output (%d) precisions must match", output.Precision(), input.Precision());
if (input.Precision() == QUDA_SINGLE_PRECISION) {
unitarizeLinks<float>(output, input, fails);
} else if(input.Precision() == QUDA_DOUBLE_PRECISION) {
unitarizeLinks<double>(output, input, fails);
} else {
errorQuda("Precision %d not supported", input.Precision());
}
#else
errorQuda("Unitarization has not been built");
#endif
}
void unitarizeLinks(cudaGaugeField &links, int* fails) {
unitarizeLinks(links, links, fails);
}
template <typename Float, typename G>
struct ProjectSU3Arg {
int threads; // number of active threads required
G u;
Float tol;
int *fails;
ProjectSU3Arg(G u, const GaugeField &meta, Float tol, int *fails)
: threads(meta.VolumeCB()), u(u), tol(tol), fails(fails) { }
};
template<typename Float, typename G>
__global__ void ProjectSU3kernel(ProjectSU3Arg<Float,G> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y + blockIdx.y*blockDim.y;
int mu = threadIdx.z + blockIdx.z*blockDim.z;
if (idx >= arg.threads) return;
if (mu >= 4) return;
Matrix<complex<Float>,3> u = arg.u(mu, idx, parity);
polarSu3<Float>(u, arg.tol);
// count number of failures
if (u.isUnitary(arg.tol) == false) {
atomicAdd(arg.fails, 1);
}
arg.u(mu, idx, parity) = u;
}
template<typename Float, typename G>
class ProjectSU3 : TunableVectorYZ {
ProjectSU3Arg<Float,G> arg;
const GaugeField &meta;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.threads; }
public:
ProjectSU3(ProjectSU3Arg<Float,G> &arg, const GaugeField &meta)
: TunableVectorYZ(2, 4), arg(arg), meta(meta) { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
ProjectSU3kernel<Float,G><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
}
void preTune() { arg.u.save(); }
void postTune() {
arg.u.load();
cudaMemset(arg.fails, 0, sizeof(int)); // reset fails counter
}
long long flops() const { return 0; } // depends on number of iterations
long long bytes() const { return 4ll * 2 * arg.threads * 2 * arg.u.Bytes(); }
TuneKey tuneKey() const {
std::stringstream aux;
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str());
}
};
template <typename Float>
void projectSU3(cudaGaugeField &u, double tol, int *fails) {
if (u.Reconstruct() == QUDA_RECONSTRUCT_NO) {
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G;
ProjectSU3Arg<Float,G> arg(G(u), u, static_cast<Float>(tol), fails);
ProjectSU3<Float,G> project(arg, u);
project.apply(0);
qudaDeviceSynchronize();
checkCudaError();
} else {
errorQuda("Reconstruct %d not supported", u.Reconstruct());
}
}
void projectSU3(cudaGaugeField &u, double tol, int *fails) {
#ifdef GPU_UNITARIZE
// check the the field doesn't have staggered phases applied
if (u.StaggeredPhaseApplied())
errorQuda("Cannot project gauge field with staggered phases applied");
if (u.Precision() == QUDA_DOUBLE_PRECISION) {
projectSU3<double>(u, tol, fails);
} else if (u.Precision() == QUDA_SINGLE_PRECISION) {
projectSU3<float>(u, tol, fails);
} else {
errorQuda("Precision %d not supported", u.Precision());
}
#else
errorQuda("Unitarization has not been built");
#endif
}
} // namespace quda
|
65bf1573415934dad518e151eecbea9bd9c06be8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vectorAdd.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
const float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numElements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vectorAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 65bf1573415934dad518e151eecbea9bd9c06be8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vectorAdd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
const float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int numElements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vectorAdd<<<gridBlock,threadBlock>>>(A,B,C,numElements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vectorAdd<<<gridBlock,threadBlock>>>(A,B,C,numElements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vectorAdd<<<gridBlock,threadBlock>>>(A,B,C,numElements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7a6b1a4d694157e16e60e58b5c207dc356b437f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -x hip %s \
// RUN: -std=c++17 -O3 -mllvm -amdgpu-internalize-symbols -emit-llvm -o - \
// RUN: | FileCheck -check-prefix=DEV %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x hip %s \
// RUN: -std=c++17 -O3 -emit-llvm -o - | FileCheck -check-prefix=HOST %s
// Negative tests.
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -x hip %s \
// RUN: -std=c++17 -O3 -mllvm -amdgpu-internalize-symbols -emit-llvm -o - \
// RUN: | FileCheck -check-prefix=DEV-NEG %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x hip %s \
// RUN: -std=c++17 -O3 -emit-llvm -o - | FileCheck -check-prefix=HOST-NEG %s
#include "Inputs/cuda.h"
// DEV-DAG: @v1
__device__ int v1;
// DEV-DAG: @v2
__constant__ int v2;
// Check device variables used by neither host nor device functioins are not kept.
// DEV-NEG-NOT: @_ZL2v3
static __device__ int v3;
// Check device variables used by host functions are kept.
// DEV-DAG: @u1
__device__ int u1;
// DEV-DAG: @u2
__constant__ int u2;
// Check host-used static device var is in llvm.compiler.used.
// DEV-DAG: @_ZL2u3
static __device__ int u3;
// Check device-used static device var is emitted but is not in llvm.compiler.used.
// DEV-DAG: @_ZL2u4
static __device__ int u4;
// Check device variables with used attribute are always kept.
// DEV-DAG: @u5
__device__ __attribute__((used)) int u5;
// Test external device variable ODR-used by host code is not emitted or registered.
// DEV-NEG-NOT: @ext_var
extern __device__ int ext_var;
// DEV-DAG: @inline_var = linkonce_odr addrspace(1) externally_initialized global i32 0
__device__ inline int inline_var;
template<typename T>
using func_t = T (*) (T, T);
template <typename T>
__device__ T add_func (T x, T y)
{
return x + y;
}
// DEV-DAG: @_Z10p_add_funcIiE = linkonce_odr addrspace(1) externally_initialized global ptr @_Z8add_funcIiET_S0_S0_
template <typename T>
__device__ func_t<T> p_add_func = add_func<T>;
// Check non-constant constexpr variables ODR-used by host code only is not emitted.
// DEV-NEG-NOT: constexpr_var1a
// DEV-NEG-NOT: constexpr_var1b
constexpr int constexpr_var1a = 1;
inline constexpr int constexpr_var1b = 1;
// Check constant constexpr variables ODR-used by host code only.
// Non-inline constexpr variable has internal linkage, therefore it is not accessible by host and not kept.
// Inline constexpr variable has linkonce_ord linkage, therefore it can be accessed by host and kept.
// DEV-NEG-NOT: constexpr_var2a
// DEV-DAG: @constexpr_var2b = linkonce_odr addrspace(4) externally_initialized constant i32 2
__constant__ constexpr int constexpr_var2a = 2;
inline __constant__ constexpr int constexpr_var2b = 2;
void use(func_t<int> p);
__host__ __device__ void use(const int *p);
// Check static device variable in host function.
// DEV-DAG: @_ZZ4fun1vE11static_var1 = addrspace(1) externally_initialized global i32 3
void fun1() {
static __device__ int static_var1 = 3;
use(&u1);
use(&u2);
use(&u3);
use(&ext_var);
use(&inline_var);
use(p_add_func<int>);
use(&constexpr_var1a);
use(&constexpr_var1b);
use(&constexpr_var2a);
use(&constexpr_var2b);
use(&static_var1);
}
// Check static variable in host device function.
// DEV-DAG: @_ZZ4fun2vE11static_var2 = internal addrspace(1) global i32 4
// DEV-DAG: @_ZZ4fun2vE11static_var3 = addrspace(1) global i32 4
__host__ __device__ void fun2() {
static int static_var2 = 4;
static __device__ int static_var3 = 4;
use(&static_var2);
use(&static_var3);
}
__global__ void kern1(int **x) {
*x = &u4;
fun2();
}
// Check static variables of lambda functions.
// Lambda functions are implicit host device functions.
// Default static variables in lambda functions should be treated
// as host variables on host side, therefore should not be forced
// to be emitted on device.
// DEV-DAG: @_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var2 = addrspace(1) externally_initialized global i32 5
// DEV-NEG-NOT: @_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var1
namespace TestStaticVarInLambda {
class A {
public:
A(char *);
};
void fun() {
(void) [](char *c) {
static A var1(c);
static __device__ int var2 = 5;
(void) var1;
(void) var2;
};
}
}
// Check implicit constant variable ODR-used by host code is not emitted.
// AST contains instantiation of al<ar>, which triggers AST instantiation
// of x::al<ar>::am, which triggers AST instatiation of x::ap<ar>,
// which triggers AST instantiation of aw<ar>::c, which has type
// ar. ar has base class x which has member ah. x::ah is initialized
// with function pointer pointing to ar:as, which returns an object
// of type ou. The constexpr aw<ar>::c is an implicit constant variable
// which is ODR-used by host function x::ap<ar>. An incorrect implementation
// will force aw<ar>::c to be emitted on device side, which will trigger
// emit of x::as and further more ctor of ou and variable o.
// The ODR-use of aw<ar>::c in x::ap<ar> should be treated as a host variable
// instead of device variable.
// DEV-NEG-NOT: _ZN16TestConstexprVar1oE
namespace TestConstexprVar {
char o;
class ou {
public:
ou(char) { __builtin_strlen(&o); }
};
template < typename ao > struct aw { static constexpr ao c; };
class x {
protected:
typedef ou (*y)(const x *);
constexpr x(y ag) : ah(ag) {}
template < bool * > struct ak;
template < typename > struct al {
static bool am;
static ak< &am > an;
};
template < typename ao > static x ap() { (void)aw< ao >::c; return x(nullptr); }
y ah;
};
template < typename ao > bool x::al< ao >::am(&ap< ao >);
class ar : x {
public:
constexpr ar() : x(as) {}
static ou as(const x *) { return 0; }
al< ar > av;
};
}
// Check the exact list of variables to ensure @_ZL2u4 is not among them.
// DEV: @llvm.compiler.used = {{[^@]*}} @_Z10p_add_funcIiE
// DEV-SAME: {{^[^@]*}} @_ZL2u3
// DEV-SAME: {{^[^@]*}} @_ZZ4fun1vE11static_var1
// DEV-SAME: {{^[^@]*}} @_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var2
// DEV-SAME: {{^[^@]*}} @constexpr_var2b
// DEV-SAME: {{^[^@]*}} @inline_var
// DEV-SAME: {{^[^@]*}} @u1
// DEV-SAME: {{^[^@]*}} @u2
// DEV-SAME: {{^[^@]*}} @u5
// DEV-SAME: {{^[^@]*$}}
// HOST-DAG: hipRegisterVar{{.*}}@u1
// HOST-DAG: hipRegisterVar{{.*}}@u2
// HOST-DAG: hipRegisterVar{{.*}}@_ZL2u3
// HOST-DAG: hipRegisterVar{{.*}}@constexpr_var2b
// HOST-DAG: hipRegisterVar{{.*}}@u5
// HOST-DAG: hipRegisterVar{{.*}}@inline_var
// HOST-DAG: hipRegisterVar{{.*}}@_Z10p_add_funcIiE
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZ4fun1vE11static_var1
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZ4fun2vE11static_var2
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZ4fun2vE11static_var3
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var2
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var1
// HOST-NEG-NOT: hipRegisterVar{{.*}}@ext_var
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZL2u4
// HOST-NEG-NOT: hipRegisterVar{{.*}}@constexpr_var1a
// HOST-NEG-NOT: hipRegisterVar{{.*}}@constexpr_var1b
// HOST-NEG-NOT: hipRegisterVar{{.*}}@constexpr_var2a
| 7a6b1a4d694157e16e60e58b5c207dc356b437f3.cu | // REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -x hip %s \
// RUN: -std=c++17 -O3 -mllvm -amdgpu-internalize-symbols -emit-llvm -o - \
// RUN: | FileCheck -check-prefix=DEV %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x hip %s \
// RUN: -std=c++17 -O3 -emit-llvm -o - | FileCheck -check-prefix=HOST %s
// Negative tests.
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -x hip %s \
// RUN: -std=c++17 -O3 -mllvm -amdgpu-internalize-symbols -emit-llvm -o - \
// RUN: | FileCheck -check-prefix=DEV-NEG %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -x hip %s \
// RUN: -std=c++17 -O3 -emit-llvm -o - | FileCheck -check-prefix=HOST-NEG %s
#include "Inputs/cuda.h"
// DEV-DAG: @v1
__device__ int v1;
// DEV-DAG: @v2
__constant__ int v2;
// Check device variables used by neither host nor device functioins are not kept.
// DEV-NEG-NOT: @_ZL2v3
static __device__ int v3;
// Check device variables used by host functions are kept.
// DEV-DAG: @u1
__device__ int u1;
// DEV-DAG: @u2
__constant__ int u2;
// Check host-used static device var is in llvm.compiler.used.
// DEV-DAG: @_ZL2u3
static __device__ int u3;
// Check device-used static device var is emitted but is not in llvm.compiler.used.
// DEV-DAG: @_ZL2u4
static __device__ int u4;
// Check device variables with used attribute are always kept.
// DEV-DAG: @u5
__device__ __attribute__((used)) int u5;
// Test external device variable ODR-used by host code is not emitted or registered.
// DEV-NEG-NOT: @ext_var
extern __device__ int ext_var;
// DEV-DAG: @inline_var = linkonce_odr addrspace(1) externally_initialized global i32 0
__device__ inline int inline_var;
template<typename T>
using func_t = T (*) (T, T);
template <typename T>
__device__ T add_func (T x, T y)
{
return x + y;
}
// DEV-DAG: @_Z10p_add_funcIiE = linkonce_odr addrspace(1) externally_initialized global ptr @_Z8add_funcIiET_S0_S0_
template <typename T>
__device__ func_t<T> p_add_func = add_func<T>;
// Check non-constant constexpr variables ODR-used by host code only is not emitted.
// DEV-NEG-NOT: constexpr_var1a
// DEV-NEG-NOT: constexpr_var1b
constexpr int constexpr_var1a = 1;
inline constexpr int constexpr_var1b = 1;
// Check constant constexpr variables ODR-used by host code only.
// Non-inline constexpr variable has internal linkage, therefore it is not accessible by host and not kept.
// Inline constexpr variable has linkonce_ord linkage, therefore it can be accessed by host and kept.
// DEV-NEG-NOT: constexpr_var2a
// DEV-DAG: @constexpr_var2b = linkonce_odr addrspace(4) externally_initialized constant i32 2
__constant__ constexpr int constexpr_var2a = 2;
inline __constant__ constexpr int constexpr_var2b = 2;
void use(func_t<int> p);
__host__ __device__ void use(const int *p);
// Check static device variable in host function.
// DEV-DAG: @_ZZ4fun1vE11static_var1 = addrspace(1) externally_initialized global i32 3
void fun1() {
static __device__ int static_var1 = 3;
use(&u1);
use(&u2);
use(&u3);
use(&ext_var);
use(&inline_var);
use(p_add_func<int>);
use(&constexpr_var1a);
use(&constexpr_var1b);
use(&constexpr_var2a);
use(&constexpr_var2b);
use(&static_var1);
}
// Check static variable in host device function.
// DEV-DAG: @_ZZ4fun2vE11static_var2 = internal addrspace(1) global i32 4
// DEV-DAG: @_ZZ4fun2vE11static_var3 = addrspace(1) global i32 4
__host__ __device__ void fun2() {
static int static_var2 = 4;
static __device__ int static_var3 = 4;
use(&static_var2);
use(&static_var3);
}
__global__ void kern1(int **x) {
*x = &u4;
fun2();
}
// Check static variables of lambda functions.
// Lambda functions are implicit host device functions.
// Default static variables in lambda functions should be treated
// as host variables on host side, therefore should not be forced
// to be emitted on device.
// DEV-DAG: @_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var2 = addrspace(1) externally_initialized global i32 5
// DEV-NEG-NOT: @_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var1
namespace TestStaticVarInLambda {
class A {
public:
A(char *);
};
void fun() {
(void) [](char *c) {
static A var1(c);
static __device__ int var2 = 5;
(void) var1;
(void) var2;
};
}
}
// Check implicit constant variable ODR-used by host code is not emitted.
// AST contains instantiation of al<ar>, which triggers AST instantiation
// of x::al<ar>::am, which triggers AST instatiation of x::ap<ar>,
// which triggers AST instantiation of aw<ar>::c, which has type
// ar. ar has base class x which has member ah. x::ah is initialized
// with function pointer pointing to ar:as, which returns an object
// of type ou. The constexpr aw<ar>::c is an implicit constant variable
// which is ODR-used by host function x::ap<ar>. An incorrect implementation
// will force aw<ar>::c to be emitted on device side, which will trigger
// emit of x::as and further more ctor of ou and variable o.
// The ODR-use of aw<ar>::c in x::ap<ar> should be treated as a host variable
// instead of device variable.
// DEV-NEG-NOT: _ZN16TestConstexprVar1oE
namespace TestConstexprVar {
char o;
class ou {
public:
ou(char) { __builtin_strlen(&o); }
};
template < typename ao > struct aw { static constexpr ao c; };
class x {
protected:
typedef ou (*y)(const x *);
constexpr x(y ag) : ah(ag) {}
template < bool * > struct ak;
template < typename > struct al {
static bool am;
static ak< &am > an;
};
template < typename ao > static x ap() { (void)aw< ao >::c; return x(nullptr); }
y ah;
};
template < typename ao > bool x::al< ao >::am(&ap< ao >);
class ar : x {
public:
constexpr ar() : x(as) {}
static ou as(const x *) { return 0; }
al< ar > av;
};
}
// Check the exact list of variables to ensure @_ZL2u4 is not among them.
// DEV: @llvm.compiler.used = {{[^@]*}} @_Z10p_add_funcIiE
// DEV-SAME: {{^[^@]*}} @_ZL2u3
// DEV-SAME: {{^[^@]*}} @_ZZ4fun1vE11static_var1
// DEV-SAME: {{^[^@]*}} @_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var2
// DEV-SAME: {{^[^@]*}} @constexpr_var2b
// DEV-SAME: {{^[^@]*}} @inline_var
// DEV-SAME: {{^[^@]*}} @u1
// DEV-SAME: {{^[^@]*}} @u2
// DEV-SAME: {{^[^@]*}} @u5
// DEV-SAME: {{^[^@]*$}}
// HOST-DAG: hipRegisterVar{{.*}}@u1
// HOST-DAG: hipRegisterVar{{.*}}@u2
// HOST-DAG: hipRegisterVar{{.*}}@_ZL2u3
// HOST-DAG: hipRegisterVar{{.*}}@constexpr_var2b
// HOST-DAG: hipRegisterVar{{.*}}@u5
// HOST-DAG: hipRegisterVar{{.*}}@inline_var
// HOST-DAG: hipRegisterVar{{.*}}@_Z10p_add_funcIiE
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZ4fun1vE11static_var1
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZ4fun2vE11static_var2
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZ4fun2vE11static_var3
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var2
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZZZN21TestStaticVarInLambda3funEvENKUlPcE_clES0_E4var1
// HOST-NEG-NOT: hipRegisterVar{{.*}}@ext_var
// HOST-NEG-NOT: hipRegisterVar{{.*}}@_ZL2u4
// HOST-NEG-NOT: hipRegisterVar{{.*}}@constexpr_var1a
// HOST-NEG-NOT: hipRegisterVar{{.*}}@constexpr_var1b
// HOST-NEG-NOT: hipRegisterVar{{.*}}@constexpr_var2a
|
31dcd00cb048025aec9d9eb1d19417a9fa96d669.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <fstream>
#include <string>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "nbody.h"
#include "redutil2.h"
#include "constants.h"
using namespace std;
using namespace redutil2;
namespace kernel_nbody
{
__global__
void calc_grav_accel_naive
(
uint32_t n_obj,
const nbp_t::body_metadata* bmd,
const nbp_t::param_t* p,
const var3_t* r,
var3_t* a
)
{
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n_obj)
{
a[i].x = a[i].y = a[i].z = 0.0;
var3_t r_ij = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
/* Skip the body with the same index */
if (i == j)
{
continue;
}
// 3 FLOP
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
// 5 FLOP
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z); // = r2
// 20 FLOP
var_t d = sqrt(d2); // = r
// 2 FLOP
var_t s = p[j].mass / (d*d2);
// 6 FLOP
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
} // 36 FLOP
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
} /* kernel_nbody */
nbody::nbody(string& path_si, string& path_sd, uint32_t n_obj, uint16_t n_ppo, comp_dev_t comp_dev) :
ode(3, n_obj, 6, n_ppo, comp_dev)
{
name = "Singular 3D n-body problem";
initialize();
allocate_storage();
load_solution_info(path_si);
load_solution_data(path_sd);
if (PROC_UNIT_GPU == comp_dev.proc_unit)
{
copy_vars(COPY_DIRECTION_TO_DEVICE);
copy_params(COPY_DIRECTION_TO_DEVICE);
}
calc_integral();
tout = t;
}
nbody::~nbody()
{
deallocate_storage();
}
void nbody::initialize()
{
h_md = NULL;
d_md = NULL;
md = NULL;
}
void nbody::allocate_storage()
{
allocate_host_storage();
if (PROC_UNIT_GPU == comp_dev.proc_unit)
{
allocate_device_storage();
}
}
void nbody::allocate_host_storage()
{
ALLOCATE_HOST_VECTOR((void**)&(h_md), n_obj * sizeof(nbp_t::metadata_t));
}
void nbody::allocate_device_storage()
{
ALLOCATE_DEVICE_VECTOR((void**)&(d_md), n_obj * sizeof(nbp_t::metadata_t));
}
void nbody::deallocate_storage()
{
//NOTE : First always release the DEVICE memory
if (PROC_UNIT_GPU == comp_dev.proc_unit)
{
deallocate_device_storage();
}
deallocate_host_storage();
}
void nbody::deallocate_host_storage()
{
FREE_HOST_VECTOR((void **)&(h_md));
}
void nbody::deallocate_device_storage()
{
FREE_DEVICE_VECTOR((void **)&(d_md));
}
void nbody::copy_metadata(copy_direction_t dir)
{
switch (dir)
{
case COPY_DIRECTION_TO_DEVICE:
copy_vector_to_device(d_md, h_md, n_obj*sizeof(nbp_t::metadata_t));
break;
case COPY_DIRECTION_TO_HOST:
copy_vector_to_host(h_md, d_md, n_obj*sizeof(nbp_t::metadata_t));
break;
default:
throw std::string("Parameter 'dir' is out of range.");
}
}
void nbody::calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* acc, var_t* jrk)
{
if (PROC_UNIT_CPU == comp_dev.proc_unit)
{
// TODO: implement the symmetric version
cpu_calc_dy(stage, curr_t, y_temp, acc, jrk, false);
}
else
{
throw string("The nbody::gpu_calc_dy is not implemented.");
}
}
void nbody::calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy)
{
if (PROC_UNIT_CPU == comp_dev.proc_unit)
{
cpu_calc_dy(stage, curr_t, y_temp, dy, true);
}
else
{
gpu_calc_dy(stage, curr_t, y_temp, dy);
}
}
void nbody::cpu_calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* acc, var_t* jrk, bool use_symm_prop)
{
var3_t* r = (var3_t*)y_temp;
var3_t* v = (var3_t*)(y_temp + 3*n_obj);
var3_t* _acc = (var3_t*)(acc);
var3_t* _jrk = (var3_t*)(jrk);
// Clear the acceleration and jerk arrays: the += op can be used
memset(_acc, 0, n_obj*sizeof(var3_t));
memset(_jrk, 0, n_obj*sizeof(var3_t));
nbp_t::param_t* p = (nbp_t::param_t*)h_p;
if (use_symm_prop)
{
throw string("The symmetric version of nbody::cpu_calc_dy is not implemented.");
}
else
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ji = {0, 0, 0};
var3_t v_ji = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j)
{
continue;
}
r_ji.x = r[j].x - r[i].x;
r_ji.y = r[j].y - r[i].y;
r_ji.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ji.x) + SQR(r_ji.y) + SQR(r_ji.z);
var_t d = sqrt(d2);
var_t d_3 = K2 / (d*d2);
var_t s = p[j].mass * d_3;
_acc[i].x += s * r_ji.x;
_acc[i].y += s * r_ji.y;
_acc[i].z += s * r_ji.z;
v_ji.x = v[j].x - v[i].x;
v_ji.y = v[j].y - v[i].y;
v_ji.z = v[j].z - v[i].z;
var_t alpha = 3.0 * (r_ji.x * v_ji.x + r_ji.y * v_ji.y + r_ji.z * v_ji.z) / (d2 * d2 * d);
_jrk[i].x += s * v_ji.x - alpha * _acc[i].x;
_jrk[i].y += s * v_ji.y - alpha * _acc[i].y;
_jrk[i].z += s * v_ji.z - alpha * _acc[i].z;
}
}
}
}
void nbody::cpu_calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy, bool use_symm_prop)
{
// Copy the velocities into dy
memcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t));
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
// Clear the acceleration array: the += op can be used
memset(a, 0, 3*n_obj*sizeof(var_t));
nbp_t::param_t* p = (nbp_t::param_t*)h_p;
if (use_symm_prop)
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = i+1; j < n_obj; j++)
{
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
s = p[i].mass * d_3;
a[j].x -= s * r_ij.x;
a[j].y -= s * r_ij.y;
a[j].z -= s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
else
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j)
{
continue;
}
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
}
void nbody::gpu_calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy)
{
// TODO: do a benchmark and set the optimal thread number
{
n_tpb = 256;
}
set_kernel_launch_param(n_var, n_tpb, grid, block);
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
nbp_t::param_t* p = (nbp_t::param_t*)d_p;
// TODO: use asynchronous copy operation
CUDA_SAFE_CALL(hipMemcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t), hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( kernel_nbody::calc_grav_accel_naive), dim3(grid), dim3(block), 0, 0, n_obj, d_md, p, r, a);
CUDA_CHECK_ERROR();
}
void nbody::calc_integral()
{
static bool first_call = true;
nbp_t::param_t* p = (nbp_t::param_t*)h_p;
var3_t* r = (var3_t*)h_y;
var3_t* v = (var3_t*)(h_y + 3*n_obj);
integral.R = tools::nbp::calc_position_of_bc(n_obj, p, r);
integral.V = tools::nbp::calc_velocity_of_bc(n_obj, p, v);
integral.c = tools::nbp::calc_angular_momentum(n_obj, p, r, v);
integral.h = tools::nbp::calc_total_energy(n_obj, p, r, v);
if (first_call)
{
first_call = false;
integral.R0 = integral.R;
integral.V0 = integral.V;
integral.c0 = integral.c;
integral.h0 = integral.h;
}
}
void nbody::load_solution_info(string& path)
{
ifstream input;
cout << "Loading " << path << " ";
data_rep_t repres = file::get_data_repres(path);
switch (repres)
{
case DATA_REPRESENTATION_ASCII:
input.open(path.c_str(), ios::in);
if (input)
{
input >> t >> dt >> n_obj;
}
else
{
throw string("Cannot open " + path + ".");
}
break;
case DATA_REPRESENTATION_BINARY:
input.open(path.c_str(), ios::in | ios::binary);
if (input)
{
input.read((char*)&t, sizeof(var_t));
input.read((char*)&dt, sizeof(var_t));
input.read((char*)&n_obj, sizeof(uint32_t));
}
else
{
throw string("Cannot open " + path + ".");
}
break;
default:
throw string("Parameter 'repres' is out of range.");
}
input.close();
cout << " done" << endl;
}
void nbody::load_solution_data(string& path)
{
ifstream input;
cout << "Loading " << path << " ";
data_rep_t repres = file::get_data_repres(path);
switch (repres)
{
case DATA_REPRESENTATION_ASCII:
input.open(path.c_str(), ios::in);
if (input)
{
load_ascii(input);
}
else
{
throw string("Cannot open " + path + ".");
}
break;
case DATA_REPRESENTATION_BINARY:
input.open(path.c_str(), ios::in | ios::binary);
if (input)
{
load_binary(input);
}
else
{
throw string("Cannot open " + path + ".");
}
break;
default:
throw string("Parameter 'repres' is out of range.");
}
input.close();
cout << " done" << endl;
}
void nbody::load_ascii(ifstream& input)
{
for (uint32_t i = 0; i < n_obj; i++)
{
// id
input >> h_md[i].id;
// mass
input >> h_p[i];
uint32_t offset = 3*i;
// position
input >> h_y[offset+0] >> h_y[offset+1] >> h_y[offset+2];
offset += 3*n_obj;
// velocity
input >> h_y[offset+0] >> h_y[offset+1] >> h_y[offset+2];
}
}
void nbody::load_binary(ifstream& input)
{
for (uint32_t i = 0; i < n_obj; i++)
{
input.read((char*)(h_md + i), sizeof(uint32_t));
input.read((char*)(h_p + i), sizeof(var_t));
var_t* y = h_p + 6*i;
input.read((char*)y, 6*sizeof(var_t));
}
}
void nbody::print_solution(std::string& path_si, std::string& path_sd, data_rep_t repres)
{
ofstream sout;
switch (repres)
{
case DATA_REPRESENTATION_ASCII:
sout.open(path_si.c_str(), ios::out | ios::app);
break;
case DATA_REPRESENTATION_BINARY:
sout.open(path_si.c_str(), ios::out | ios::app | ios::binary);
break;
default:
throw string("Parameter 'repres' is out of range.");
}
if (!sout)
{
throw string("Cannot open " + path_si + ".");
}
file::nbp::print_solution_info(sout, t, dt, n_obj, repres);
sout.close();
switch (repres)
{
case DATA_REPRESENTATION_ASCII:
sout.open(path_sd.c_str(), ios::out | ios::app);
break;
case DATA_REPRESENTATION_BINARY:
sout.open(path_sd.c_str(), ios::out | ios::app | ios::binary);
break;
default:
throw string("Parameter 'repres' is out of range.");
}
if (!sout)
{
throw string("Cannot open " + path_sd + ".");
}
file::nbp::print_solution_data(sout, n_obj, n_ppo, n_vpo, h_md, h_p, h_y, repres);
sout.close();
}
void nbody::print_integral(string& path)
{
ofstream sout;
sout.open(path.c_str(), ios::out | ios::app);
if (sout)
{
sout.precision(16);
sout.setf(ios::right);
sout.setf(ios::scientific);
sout << setw(VAR_T_W) << t << SEP /* time of the record [day] (double) */
<< setw(VAR_T_W) << integral.R.x << SEP /* x-position of the barycenter */
<< setw(VAR_T_W) << integral.R.y << SEP /* y-position of the barycenter */
<< setw(VAR_T_W) << integral.R.z << SEP /* z-position of the barycenter */
<< setw(VAR_T_W) << integral.V.x << SEP /* x-velocity of the barycenter */
<< setw(VAR_T_W) << integral.V.y << SEP /* y-velocity of the barycenter */
<< setw(VAR_T_W) << integral.V.z << SEP /* z-velocity of the barycenter */
<< setw(VAR_T_W) << integral.c.x << SEP /* x-angular momentum */
<< setw(VAR_T_W) << integral.c.y << SEP /* y-angular momentum */
<< setw(VAR_T_W) << integral.c.z << SEP /* z-angular momentum */
<< setw(VAR_T_W) << integral.h << endl; /* energy of the system */
}
else
{
throw string("Cannot open " + path + ".");
}
sout.close();
}
| 31dcd00cb048025aec9d9eb1d19417a9fa96d669.cu | #include <iostream>
#include <iomanip>
#include <fstream>
#include <string>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "nbody.h"
#include "redutil2.h"
#include "constants.h"
using namespace std;
using namespace redutil2;
namespace kernel_nbody
{
__global__
void calc_grav_accel_naive
(
uint32_t n_obj,
const nbp_t::body_metadata* bmd,
const nbp_t::param_t* p,
const var3_t* r,
var3_t* a
)
{
const uint32_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n_obj)
{
a[i].x = a[i].y = a[i].z = 0.0;
var3_t r_ij = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
/* Skip the body with the same index */
if (i == j)
{
continue;
}
// 3 FLOP
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
// 5 FLOP
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z); // = r2
// 20 FLOP
var_t d = sqrt(d2); // = r
// 2 FLOP
var_t s = p[j].mass / (d*d2);
// 6 FLOP
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
} // 36 FLOP
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
} /* kernel_nbody */
nbody::nbody(string& path_si, string& path_sd, uint32_t n_obj, uint16_t n_ppo, comp_dev_t comp_dev) :
ode(3, n_obj, 6, n_ppo, comp_dev)
{
name = "Singular 3D n-body problem";
initialize();
allocate_storage();
load_solution_info(path_si);
load_solution_data(path_sd);
if (PROC_UNIT_GPU == comp_dev.proc_unit)
{
copy_vars(COPY_DIRECTION_TO_DEVICE);
copy_params(COPY_DIRECTION_TO_DEVICE);
}
calc_integral();
tout = t;
}
nbody::~nbody()
{
deallocate_storage();
}
void nbody::initialize()
{
h_md = NULL;
d_md = NULL;
md = NULL;
}
void nbody::allocate_storage()
{
allocate_host_storage();
if (PROC_UNIT_GPU == comp_dev.proc_unit)
{
allocate_device_storage();
}
}
void nbody::allocate_host_storage()
{
ALLOCATE_HOST_VECTOR((void**)&(h_md), n_obj * sizeof(nbp_t::metadata_t));
}
void nbody::allocate_device_storage()
{
ALLOCATE_DEVICE_VECTOR((void**)&(d_md), n_obj * sizeof(nbp_t::metadata_t));
}
void nbody::deallocate_storage()
{
//NOTE : First always release the DEVICE memory
if (PROC_UNIT_GPU == comp_dev.proc_unit)
{
deallocate_device_storage();
}
deallocate_host_storage();
}
void nbody::deallocate_host_storage()
{
FREE_HOST_VECTOR((void **)&(h_md));
}
void nbody::deallocate_device_storage()
{
FREE_DEVICE_VECTOR((void **)&(d_md));
}
void nbody::copy_metadata(copy_direction_t dir)
{
switch (dir)
{
case COPY_DIRECTION_TO_DEVICE:
copy_vector_to_device(d_md, h_md, n_obj*sizeof(nbp_t::metadata_t));
break;
case COPY_DIRECTION_TO_HOST:
copy_vector_to_host(h_md, d_md, n_obj*sizeof(nbp_t::metadata_t));
break;
default:
throw std::string("Parameter 'dir' is out of range.");
}
}
void nbody::calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* acc, var_t* jrk)
{
if (PROC_UNIT_CPU == comp_dev.proc_unit)
{
// TODO: implement the symmetric version
cpu_calc_dy(stage, curr_t, y_temp, acc, jrk, false);
}
else
{
throw string("The nbody::gpu_calc_dy is not implemented.");
}
}
void nbody::calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy)
{
if (PROC_UNIT_CPU == comp_dev.proc_unit)
{
cpu_calc_dy(stage, curr_t, y_temp, dy, true);
}
else
{
gpu_calc_dy(stage, curr_t, y_temp, dy);
}
}
void nbody::cpu_calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* acc, var_t* jrk, bool use_symm_prop)
{
var3_t* r = (var3_t*)y_temp;
var3_t* v = (var3_t*)(y_temp + 3*n_obj);
var3_t* _acc = (var3_t*)(acc);
var3_t* _jrk = (var3_t*)(jrk);
// Clear the acceleration and jerk arrays: the += op can be used
memset(_acc, 0, n_obj*sizeof(var3_t));
memset(_jrk, 0, n_obj*sizeof(var3_t));
nbp_t::param_t* p = (nbp_t::param_t*)h_p;
if (use_symm_prop)
{
throw string("The symmetric version of nbody::cpu_calc_dy is not implemented.");
}
else
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ji = {0, 0, 0};
var3_t v_ji = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j)
{
continue;
}
r_ji.x = r[j].x - r[i].x;
r_ji.y = r[j].y - r[i].y;
r_ji.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ji.x) + SQR(r_ji.y) + SQR(r_ji.z);
var_t d = sqrt(d2);
var_t d_3 = K2 / (d*d2);
var_t s = p[j].mass * d_3;
_acc[i].x += s * r_ji.x;
_acc[i].y += s * r_ji.y;
_acc[i].z += s * r_ji.z;
v_ji.x = v[j].x - v[i].x;
v_ji.y = v[j].y - v[i].y;
v_ji.z = v[j].z - v[i].z;
var_t alpha = 3.0 * (r_ji.x * v_ji.x + r_ji.y * v_ji.y + r_ji.z * v_ji.z) / (d2 * d2 * d);
_jrk[i].x += s * v_ji.x - alpha * _acc[i].x;
_jrk[i].y += s * v_ji.y - alpha * _acc[i].y;
_jrk[i].z += s * v_ji.z - alpha * _acc[i].z;
}
}
}
}
void nbody::cpu_calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy, bool use_symm_prop)
{
// Copy the velocities into dy
memcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t));
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
// Clear the acceleration array: the += op can be used
memset(a, 0, 3*n_obj*sizeof(var_t));
nbp_t::param_t* p = (nbp_t::param_t*)h_p;
if (use_symm_prop)
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = i+1; j < n_obj; j++)
{
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
s = p[i].mass * d_3;
a[j].x -= s * r_ij.x;
a[j].y -= s * r_ij.y;
a[j].z -= s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
else
{
for (uint32_t i = 0; i < n_obj; i++)
{
var3_t r_ij = {0, 0, 0};
for (uint32_t j = 0; j < n_obj; j++)
{
if (i == j)
{
continue;
}
r_ij.x = r[j].x - r[i].x;
r_ij.y = r[j].y - r[i].y;
r_ij.z = r[j].z - r[i].z;
var_t d2 = SQR(r_ij.x) + SQR(r_ij.y) + SQR(r_ij.z);
var_t d = sqrt(d2);
var_t d_3 = 1.0 / (d*d2);
var_t s = p[j].mass * d_3;
a[i].x += s * r_ij.x;
a[i].y += s * r_ij.y;
a[i].z += s * r_ij.z;
}
a[i].x *= K2;
a[i].y *= K2;
a[i].z *= K2;
}
}
}
void nbody::gpu_calc_dy(uint16_t stage, var_t curr_t, const var_t* y_temp, var_t* dy)
{
// TODO: do a benchmark and set the optimal thread number
{
n_tpb = 256;
}
set_kernel_launch_param(n_var, n_tpb, grid, block);
var3_t* r = (var3_t*)y_temp;
var3_t* a = (var3_t*)(dy + 3*n_obj);
nbp_t::param_t* p = (nbp_t::param_t*)d_p;
// TODO: use asynchronous copy operation
CUDA_SAFE_CALL(cudaMemcpy(dy, y_temp + 3*n_obj, 3*n_obj*sizeof(var_t), cudaMemcpyDeviceToDevice));
kernel_nbody::calc_grav_accel_naive<<<grid, block>>>(n_obj, d_md, p, r, a);
CUDA_CHECK_ERROR();
}
void nbody::calc_integral()
{
static bool first_call = true;
nbp_t::param_t* p = (nbp_t::param_t*)h_p;
var3_t* r = (var3_t*)h_y;
var3_t* v = (var3_t*)(h_y + 3*n_obj);
integral.R = tools::nbp::calc_position_of_bc(n_obj, p, r);
integral.V = tools::nbp::calc_velocity_of_bc(n_obj, p, v);
integral.c = tools::nbp::calc_angular_momentum(n_obj, p, r, v);
integral.h = tools::nbp::calc_total_energy(n_obj, p, r, v);
if (first_call)
{
first_call = false;
integral.R0 = integral.R;
integral.V0 = integral.V;
integral.c0 = integral.c;
integral.h0 = integral.h;
}
}
void nbody::load_solution_info(string& path)
{
ifstream input;
cout << "Loading " << path << " ";
data_rep_t repres = file::get_data_repres(path);
switch (repres)
{
case DATA_REPRESENTATION_ASCII:
input.open(path.c_str(), ios::in);
if (input)
{
input >> t >> dt >> n_obj;
}
else
{
throw string("Cannot open " + path + ".");
}
break;
case DATA_REPRESENTATION_BINARY:
input.open(path.c_str(), ios::in | ios::binary);
if (input)
{
input.read((char*)&t, sizeof(var_t));
input.read((char*)&dt, sizeof(var_t));
input.read((char*)&n_obj, sizeof(uint32_t));
}
else
{
throw string("Cannot open " + path + ".");
}
break;
default:
throw string("Parameter 'repres' is out of range.");
}
input.close();
cout << " done" << endl;
}
void nbody::load_solution_data(string& path)
{
ifstream input;
cout << "Loading " << path << " ";
data_rep_t repres = file::get_data_repres(path);
switch (repres)
{
case DATA_REPRESENTATION_ASCII:
input.open(path.c_str(), ios::in);
if (input)
{
load_ascii(input);
}
else
{
throw string("Cannot open " + path + ".");
}
break;
case DATA_REPRESENTATION_BINARY:
input.open(path.c_str(), ios::in | ios::binary);
if (input)
{
load_binary(input);
}
else
{
throw string("Cannot open " + path + ".");
}
break;
default:
throw string("Parameter 'repres' is out of range.");
}
input.close();
cout << " done" << endl;
}
void nbody::load_ascii(ifstream& input)
{
for (uint32_t i = 0; i < n_obj; i++)
{
// id
input >> h_md[i].id;
// mass
input >> h_p[i];
uint32_t offset = 3*i;
// position
input >> h_y[offset+0] >> h_y[offset+1] >> h_y[offset+2];
offset += 3*n_obj;
// velocity
input >> h_y[offset+0] >> h_y[offset+1] >> h_y[offset+2];
}
}
void nbody::load_binary(ifstream& input)
{
for (uint32_t i = 0; i < n_obj; i++)
{
input.read((char*)(h_md + i), sizeof(uint32_t));
input.read((char*)(h_p + i), sizeof(var_t));
var_t* y = h_p + 6*i;
input.read((char*)y, 6*sizeof(var_t));
}
}
void nbody::print_solution(std::string& path_si, std::string& path_sd, data_rep_t repres)
{
ofstream sout;
switch (repres)
{
case DATA_REPRESENTATION_ASCII:
sout.open(path_si.c_str(), ios::out | ios::app);
break;
case DATA_REPRESENTATION_BINARY:
sout.open(path_si.c_str(), ios::out | ios::app | ios::binary);
break;
default:
throw string("Parameter 'repres' is out of range.");
}
if (!sout)
{
throw string("Cannot open " + path_si + ".");
}
file::nbp::print_solution_info(sout, t, dt, n_obj, repres);
sout.close();
switch (repres)
{
case DATA_REPRESENTATION_ASCII:
sout.open(path_sd.c_str(), ios::out | ios::app);
break;
case DATA_REPRESENTATION_BINARY:
sout.open(path_sd.c_str(), ios::out | ios::app | ios::binary);
break;
default:
throw string("Parameter 'repres' is out of range.");
}
if (!sout)
{
throw string("Cannot open " + path_sd + ".");
}
file::nbp::print_solution_data(sout, n_obj, n_ppo, n_vpo, h_md, h_p, h_y, repres);
sout.close();
}
void nbody::print_integral(string& path)
{
ofstream sout;
sout.open(path.c_str(), ios::out | ios::app);
if (sout)
{
sout.precision(16);
sout.setf(ios::right);
sout.setf(ios::scientific);
sout << setw(VAR_T_W) << t << SEP /* time of the record [day] (double) */
<< setw(VAR_T_W) << integral.R.x << SEP /* x-position of the barycenter */
<< setw(VAR_T_W) << integral.R.y << SEP /* y-position of the barycenter */
<< setw(VAR_T_W) << integral.R.z << SEP /* z-position of the barycenter */
<< setw(VAR_T_W) << integral.V.x << SEP /* x-velocity of the barycenter */
<< setw(VAR_T_W) << integral.V.y << SEP /* y-velocity of the barycenter */
<< setw(VAR_T_W) << integral.V.z << SEP /* z-velocity of the barycenter */
<< setw(VAR_T_W) << integral.c.x << SEP /* x-angular momentum */
<< setw(VAR_T_W) << integral.c.y << SEP /* y-angular momentum */
<< setw(VAR_T_W) << integral.c.z << SEP /* z-angular momentum */
<< setw(VAR_T_W) << integral.h << endl; /* energy of the system */
}
else
{
throw string("Cannot open " + path + ".");
}
sout.close();
}
|
6e7a94e4eca5f5319559a331fbdfeef7bd977afe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki, Wei Hu
// =============================================================================
//
// Class for performing time integration in fluid system.
// =============================================================================
#include "chrono_fsi/physics/ChFluidDynamics.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
using std::cout;
using std::endl;
namespace chrono {
namespace fsi {
// -----------------------------------------------------------------------------
// Device function to calculate the share of density influence on a given
// particle from all other particle in a given cell
__device__ void collideCellDensityReInit(Real& numerator,
Real& denominator,
int3 gridPos,
uint index,
Real3 posRadA,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd) {
uint gridHash = calcGridHash(gridPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real4 rhoPreMuB = sortedRhoPreMu[j];
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML)
continue;
numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w);
denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w);
}
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along x
__global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y += paramsD.deltaPress.x;
return;
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y -= paramsD.deltaPress.x;
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply inlet/outlet BC along x
__global__ void ApplyInletBoundaryXKernel(Real4* posRadD,
Real3* VelMassD,
Real4* rhoPresMuD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (rhoPresMu.w > 0.0)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w <= 0.0) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
if (rhoPresMu.w <= -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x > -paramsD.x_in)
rhoPresMuD[index].y = 0;
if (posRad.x < paramsD.x_in)
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along y
__global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.y > paramsD.cMax.y) {
posRad.y -= (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.y < paramsD.cMin.y) {
posRad.y += (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along z
__global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.z > paramsD.cMax.z) {
posRad.z -= (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.z < paramsD.cMin.z) {
posRad.z += (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to keep particle inside the simulation domain
__global__ void ApplyOutOfBoundaryKernel(Real4* posRadD,
Real4* rhoPresMuD,
Real3* velMasD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real3 vel = mR3(velMasD[index]);
Real h = posRadD[index].w;
if (posRad.x > 0.5 * paramsD.boxDimX)
posRad.x = 0.5 * paramsD.boxDimX;
if (posRad.x < -0.5 * paramsD.boxDimX)
posRad.x = -0.5 * paramsD.boxDimX;
if (posRad.y > 0.5 * paramsD.boxDimY)
posRad.y = 0.5 * paramsD.boxDimY;
if (posRad.y < -0.5 * paramsD.boxDimY)
posRad.y = -0.5 * paramsD.boxDimY;
if (posRad.z > 1.0 * paramsD.boxDimZ)
posRad.z = 1.0 * paramsD.boxDimZ;
if (posRad.z < -0.0 * paramsD.boxDimZ)
posRad.z = -0.0 * paramsD.boxDimZ;
posRadD[index] = mR4(posRad, h);
velMasD[index] = mR3(vel);
return;
}
// -----------------------------------------------------------------------------
// Kernel to update the fluid properities. It updates the stress tensor,
// density, velocity and position relying on explicit Euler scheme.
// Pressure is obtained from the density and an Equation of State.
__global__ void UpdateFluidD(Real4* posRadD,
Real3* velMasD,
Real4* rhoPresMuD,
Real3* tauXxYyZzD,
Real3* tauXyXzYzD,
Real3* vel_XSPH_D,
Real4* derivVelRhoD,
Real3* derivTauXxYyZzD,
Real3* derivTauXyXzYzD,
Real4* sr_tau_I_mu_iD,
uint* activityIdentifierD,
uint* freeSurfaceIdD,
int2 updatePortion,
Real dT,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return;
Real4 derivVelRho = derivVelRhoD[index];
Real4 rhoPresMu = rhoPresMuD[index];
Real h = posRadD[index].w;
Real p_tr, p_n;
if (rhoPresMu.w < 0) {
// This is only implemented for granular material
if (paramsD.elastic_SPH) {
//--------------------------------
// ** total stress tau
//--------------------------------
Real3 tauXxYyZz = tauXxYyZzD[index];
Real3 tauXyXzYz = tauXyXzYzD[index];
Real3 derivTauXxYyZz = derivTauXxYyZzD[index];
Real3 derivTauXyXzYz = derivTauXyXzYzD[index];
Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT;
Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT;
// check if there is a plastic flow
p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z);
tauXxYyZz.x += p_n;
tauXxYyZz.y += p_n;
tauXxYyZz.z += p_n;
p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z);
updatedTauXxYyZz.x += p_tr;
updatedTauXxYyZz.y += p_tr;
updatedTauXxYyZz.z += p_tr;
Real coh = paramsD.Coh_coeff;
Real inv_mus = 1.0 / paramsD.mu_fric_s;
Real P_cri = - coh * inv_mus;
if (p_tr > P_cri) {
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) +
2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z);
Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) +
2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z);
tau_tr = sqrt(0.5 * tau_tr);
tau_n = sqrt(0.5 * tau_n);
Real Chi = abs(tau_tr - tau_n) * paramsD.INV_G_shear / dT;
// should use the positive magnitude according to "A
// constitutive law for dense granular flows" Nature 2006
Real mu_s = paramsD.mu_fric_s;
Real mu_2 = paramsD.mu_fric_2;
// Real s_0 = mu_s * p_tr;
// Real s_2 = mu_2 * p_tr;
// Real xi = 1.1;
Real dia = paramsD.ave_diam;
Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);//
Real I = Chi * dia * sqrt(paramsD.rho0 / p_tr);
Real mu = mu_s + (mu_2 - mu_s) * (I + 1.0E-9) / (I0 + I + 1.0E-9);
// Real G0 = paramsD.G_shear;
// Real alpha = xi*G0*I0*(dT)*sqrt(p_tr);
// Real B0 = s_2 + tau_tr + alpha;
// Real H0 = s_2*tau_tr + s_0*alpha;
// Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9);
// if(tau_tr>s_0){
// Real coeff = tau_n1/(tau_tr+1e-9);
// updatedTauXxYyZz = updatedTauXxYyZz*coeff;
// updatedTauXyXzYz = updatedTauXyXzYz*coeff;
// }
Real tau_max = p_tr * mu + coh; // p_tr*paramsD.Q_FA;
// should use tau_max instead of s_0 according to
// "A constitutive law for dense granular flows" Nature 2006
if (tau_tr > tau_max) {
Real coeff = tau_max / (tau_tr + 1e-9);
updatedTauXxYyZz = updatedTauXxYyZz * coeff;
updatedTauXyXzYz = updatedTauXyXzYz * coeff;
}
}
// Set stress to the critical value if the pressure is smaller than it
if (p_tr < P_cri) {
Real coeff = abs(P_cri / (p_tr + 1e-9));
updatedTauXxYyZz = updatedTauXxYyZz * coeff;
updatedTauXyXzYz = updatedTauXyXzYz * coeff;
// updatedTauXxYyZz = mR3(0.0);
// updatedTauXyXzYz = mR3(0.0);
p_tr = P_cri;
}
// Set stress to zero if the particle is close to free surface
if (freeSurfaceIdD[index] == 1) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
}
if (paramsD.output_length == 2) {
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * (square(updatedTauXyXzYz.x) +
square(updatedTauXyXzYz.y) + square(updatedTauXyXzYz.z));
tau_tr = sqrt(0.5 * tau_tr);
sr_tau_I_mu_iD[index].y = tau_tr;
}
tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr);
tauXyXzYzD[index] = updatedTauXyXzYz;
}
//-------------
// ** position
//-------------
Real3 vel_XSPH = velMasD[index] + vel_XSPH_D[index]; // paramsD.EPS_XSPH *
Real3 posRad = mR3(posRadD[index]);
Real3 updatedPositon = posRad + vel_XSPH * dT;
if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) {
printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
posRadD[index] = mR4(updatedPositon, h);
//-------------
// ** velocity
//-------------
// Note that the velocity update should not use the XSPH contribution
// It adds dissipation to the solution, and provides numerical damping
Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index]
Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT;
velMasD[index] = updatedVelocity;
//-------------
// ** density
//-------------
if (paramsD.elastic_SPH) { // This is only implemented for granular material
rhoPresMu.y = p_tr;
rhoPresMu.x = paramsD.rho0;
} else {
Real rho2 = rhoPresMu.x + derivVelRho.w * dT;
rhoPresMu.y = Eos(rho2, rhoPresMu.w);
rhoPresMu.x = rho2;
}
if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) {
printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
rhoPresMuD[index] = rhoPresMu;
}
// Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time
// derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces.
// calculate the force that is f=m dv/dt
// derivVelRhoD[index] *= paramsD.markerMass;
}
//------------------------------------------------------------------------------
__global__ void Update_Fluid_State(Real3* new_vel,
Real4* posRad,
Real3* velMas,
Real4* rhoPreMu,
int4 updatePortion,
const size_t numAllMarkers,
double dT,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= updatePortion.y)
return;
velMas[i_idx] = new_vel[i_idx];
Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx];
Real h = posRad[i_idx].w;
posRad[i_idx] = mR4(newpos, h);
if (!(isfinite(posRad[i_idx].x) &&
isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) {
printf("Error! particle %d position is NAN: thrown from UpdateFluidDKernel %f,%f,%f,%f\n",
i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w);
}
if (!(isfinite(rhoPreMu[i_idx].x) &&
isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) {
printf("Error! particle %d rhoPreMu is NAN: thrown from UpdateFluidDKernel ! %f,%f,%f,%f\n",
i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w);
}
if (!(isfinite(velMas[i_idx].x) &&
isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) {
printf("Error! particle %d velocity is NAN: thrown from UpdateFluidDKernel !%f,%f,%f\n",
i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z);
}
}
// -----------------------------------------------------------------------------
// Kernel for updating the density.
// It calculates the density of the particle. It does include the normalization
// close to the boundaries and free surface.
__global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
size_t numAllMarkers) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
// read particle data from sorted arrays
Real3 posRadA = mR3(sortedPosRad[index]);
Real4 rhoPreMuA = sortedRhoPreMu[index];
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real numerator = 0.0;
Real denominator = 0.0;
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
collideCellDensityReInit(numerator, denominator, neighbourPos, index,
posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd);
}
}
}
rhoPreMuA.x = numerator; // denominator;
// rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w);
dummySortedRhoPreMu[index] = rhoPreMuA;
}
// -----------------------------------------------------------------------------
// Kernel for updating the activity of all particles.
__global__ void UpdateActivityD(Real4* posRadD,
Real3* velMasD,
Real3* posRigidBodiesD,
uint* activityIdentifierD,
uint* extendedActivityIdD,
int2 updatePortion,
size_t numRigidBodies,
Real Time,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
// Set the particle as an active particle
activityIdentifierD[index] = 1;
extendedActivityIdD[index] = 1;
// If during the settling phase, all particles are active
if (Time < paramsD.settlingTime)
return;
// Check the activity of this particle
uint isNotActive = 0;
uint isNotExtended = 0;
Real3 posRadA = mR3(posRadD[index]);
for (uint num = 0; num < numRigidBodies; num++) {
Real3 detPos = posRadA - posRigidBodiesD[num];
Real3 Acdomain = paramsD.bodyActiveDomain;
Real3 ExAcdomain = paramsD.bodyActiveDomain +
mR3(2 * RESOLUTION_LENGTH_MULT * paramsD.HSML);
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
// Set the particle as an inactive particle if needed
if (isNotActive == numRigidBodies && numRigidBodies > 0) {
activityIdentifierD[index] = 0;
velMasD[index] = mR3(0.0);
}
if (isNotExtended == numRigidBodies && numRigidBodies > 0)
extendedActivityIdD[index] = 0;
return;
}
// -----------------------------------------------------------------------------
// CLASS FOR FLUID DYNAMICS SYSTEM
// -----------------------------------------------------------------------------
ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker,
ChSystemFsi_impl& otherFsiSystem,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
TimeIntegrator type,
bool verb)
: fsiSystem(otherFsiSystem),
paramsH(otherParamsH),
numObjectsH(otherNumObjects),
integrator_type(type),
verbose(verb) {
switch (integrator_type) {
case TimeIntegrator::I2SPH:
forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an I2SPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::IISPH:
forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an IISPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::EXPLICITSPH:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created a WCSPH framework =======" << endl;
cout << "============================================" << endl;
}
break;
// Extend this function with your own linear solvers
default:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
cout << "Selected integrator type not implemented, reverting back to WCSPH" << endl;
}
}
// -----------------------------------------------------------------------------
ChFluidDynamics::~ChFluidDynamics() {}
// -----------------------------------------------------------------------------
void ChFluidDynamics::Initialize() {
forceSystem->Initialize();
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real dT,
Real Time) {
if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) {
this->UpdateActivity(sphMarkersD1, sphMarkersD2, fsiBodiesD, Time);
forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD);
} else
forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD);
if (integrator_type == TimeIntegrator::IISPH)
this->UpdateFluid_Implicit(sphMarkersD2);
else if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH)
this->UpdateFluid(sphMarkersD1, dT);
this->ApplyBoundarySPH_Markers(sphMarkersD2);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateActivity(std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
Real Time) {
// Update portion of the SPH particles (should be all particles here)
int2 updatePortion = mI2(0, (int)numObjectsH->numAllMarkers);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( UpdateActivityD), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD2->posRadD), mR3CAST(sphMarkersD1->velMasD),
mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->extendedActivityIdD),
updatePortion, numObjectsH->numRigidBodies, Time, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
//------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateActivityD!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) {
// Update portion of the SPH particles (should be fluid particles only here)
int2 updatePortion = mI2(0, fsiSystem.fsiGeneralData->referenceArray[0].y);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( UpdateFluidD), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD),
mR3CAST(sphMarkersD->tauXyXzYzD),
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(fsiSystem.fsiGeneralData->derivVelRhoD_old),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXxYyZzD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXyXzYzD),
mR4CAST(fsiSystem.fsiGeneralData->sr_tau_I_mu_i),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->freeSurfaceIdD),
updatePortion, dT, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
//------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateFluidD!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0;
int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0;
int4 updatePortion = mI4(fsiSystem.fsiGeneralData->referenceArray[haveHelper].x,
fsiSystem.fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0);
cout << "time step in UpdateFluid_Implicit " << paramsH->dT << endl;
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Update_Fluid_State), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD), updatePortion,
numObjectsH->numAllMarkers, paramsH->dT, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in x, y, and z directions
void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ApplyPeriodicBoundaryXKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
// ApplyOutOfBoundaryKernel<<<numBlocks, numThreads>>>
// (mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->velMasD));
// hipDeviceSynchronize();
// cudaCheckError();
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in y, and z.
// The inlet/outlet BC is applied in the x direction.
// This functions needs to be tested.
void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ApplyInletBoundaryXKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD));
hipDeviceSynchronize();
cudaCheckError();
// these are useful anyway for out of bound particles
hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::DensityReinitialization() {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers);
thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0));
hipLaunchKernelGGL(( ReCalcDensityD_F1), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(dummySortedRhoPreMu),
mR4CAST(fsiSystem.sortedSphMarkersD->posRadD),
mR3CAST(fsiSystem.sortedSphMarkersD->velMasD),
mR4CAST(fsiSystem.sortedSphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.markersProximityD->gridMarkerIndexD),
U1CAST(fsiSystem.markersProximityD->cellStartD),
U1CAST(fsiSystem.markersProximityD->cellEndD),
numObjectsH->numAllMarkers);
hipDeviceSynchronize();
cudaCheckError();
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
dummySortedRhoPreMu.clear();
}
} // namespace fsi
} // end namespace chrono
| 6e7a94e4eca5f5319559a331fbdfeef7bd977afe.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki, Wei Hu
// =============================================================================
//
// Class for performing time integration in fluid system.
// =============================================================================
#include "chrono_fsi/physics/ChFluidDynamics.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
using std::cout;
using std::endl;
namespace chrono {
namespace fsi {
// -----------------------------------------------------------------------------
// Device function to calculate the share of density influence on a given
// particle from all other particle in a given cell
__device__ void collideCellDensityReInit(Real& numerator,
Real& denominator,
int3 gridPos,
uint index,
Real3 posRadA,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd) {
uint gridHash = calcGridHash(gridPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real4 rhoPreMuB = sortedRhoPreMu[j];
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML)
continue;
numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w);
denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w);
}
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along x
__global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y += paramsD.deltaPress.x;
return;
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y -= paramsD.deltaPress.x;
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply inlet/outlet BC along x
__global__ void ApplyInletBoundaryXKernel(Real4* posRadD,
Real3* VelMassD,
Real4* rhoPresMuD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (rhoPresMu.w > 0.0)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w <= 0.0) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
if (rhoPresMu.w <= -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x > -paramsD.x_in)
rhoPresMuD[index].y = 0;
if (posRad.x < paramsD.x_in)
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along y
__global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.y > paramsD.cMax.y) {
posRad.y -= (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.y < paramsD.cMin.y) {
posRad.y += (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along z
__global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.z > paramsD.cMax.z) {
posRad.z -= (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.z < paramsD.cMin.z) {
posRad.z += (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to keep particle inside the simulation domain
__global__ void ApplyOutOfBoundaryKernel(Real4* posRadD,
Real4* rhoPresMuD,
Real3* velMasD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real3 vel = mR3(velMasD[index]);
Real h = posRadD[index].w;
if (posRad.x > 0.5 * paramsD.boxDimX)
posRad.x = 0.5 * paramsD.boxDimX;
if (posRad.x < -0.5 * paramsD.boxDimX)
posRad.x = -0.5 * paramsD.boxDimX;
if (posRad.y > 0.5 * paramsD.boxDimY)
posRad.y = 0.5 * paramsD.boxDimY;
if (posRad.y < -0.5 * paramsD.boxDimY)
posRad.y = -0.5 * paramsD.boxDimY;
if (posRad.z > 1.0 * paramsD.boxDimZ)
posRad.z = 1.0 * paramsD.boxDimZ;
if (posRad.z < -0.0 * paramsD.boxDimZ)
posRad.z = -0.0 * paramsD.boxDimZ;
posRadD[index] = mR4(posRad, h);
velMasD[index] = mR3(vel);
return;
}
// -----------------------------------------------------------------------------
// Kernel to update the fluid properities. It updates the stress tensor,
// density, velocity and position relying on explicit Euler scheme.
// Pressure is obtained from the density and an Equation of State.
__global__ void UpdateFluidD(Real4* posRadD,
Real3* velMasD,
Real4* rhoPresMuD,
Real3* tauXxYyZzD,
Real3* tauXyXzYzD,
Real3* vel_XSPH_D,
Real4* derivVelRhoD,
Real3* derivTauXxYyZzD,
Real3* derivTauXyXzYzD,
Real4* sr_tau_I_mu_iD,
uint* activityIdentifierD,
uint* freeSurfaceIdD,
int2 updatePortion,
Real dT,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return;
Real4 derivVelRho = derivVelRhoD[index];
Real4 rhoPresMu = rhoPresMuD[index];
Real h = posRadD[index].w;
Real p_tr, p_n;
if (rhoPresMu.w < 0) {
// This is only implemented for granular material
if (paramsD.elastic_SPH) {
//--------------------------------
// ** total stress tau
//--------------------------------
Real3 tauXxYyZz = tauXxYyZzD[index];
Real3 tauXyXzYz = tauXyXzYzD[index];
Real3 derivTauXxYyZz = derivTauXxYyZzD[index];
Real3 derivTauXyXzYz = derivTauXyXzYzD[index];
Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT;
Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT;
// check if there is a plastic flow
p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z);
tauXxYyZz.x += p_n;
tauXxYyZz.y += p_n;
tauXxYyZz.z += p_n;
p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z);
updatedTauXxYyZz.x += p_tr;
updatedTauXxYyZz.y += p_tr;
updatedTauXxYyZz.z += p_tr;
Real coh = paramsD.Coh_coeff;
Real inv_mus = 1.0 / paramsD.mu_fric_s;
Real P_cri = - coh * inv_mus;
if (p_tr > P_cri) {
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) +
2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z);
Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) +
2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z);
tau_tr = sqrt(0.5 * tau_tr);
tau_n = sqrt(0.5 * tau_n);
Real Chi = abs(tau_tr - tau_n) * paramsD.INV_G_shear / dT;
// should use the positive magnitude according to "A
// constitutive law for dense granular flows" Nature 2006
Real mu_s = paramsD.mu_fric_s;
Real mu_2 = paramsD.mu_fric_2;
// Real s_0 = mu_s * p_tr;
// Real s_2 = mu_2 * p_tr;
// Real xi = 1.1;
Real dia = paramsD.ave_diam;
Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);//
Real I = Chi * dia * sqrt(paramsD.rho0 / p_tr);
Real mu = mu_s + (mu_2 - mu_s) * (I + 1.0E-9) / (I0 + I + 1.0E-9);
// Real G0 = paramsD.G_shear;
// Real alpha = xi*G0*I0*(dT)*sqrt(p_tr);
// Real B0 = s_2 + tau_tr + alpha;
// Real H0 = s_2*tau_tr + s_0*alpha;
// Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9);
// if(tau_tr>s_0){
// Real coeff = tau_n1/(tau_tr+1e-9);
// updatedTauXxYyZz = updatedTauXxYyZz*coeff;
// updatedTauXyXzYz = updatedTauXyXzYz*coeff;
// }
Real tau_max = p_tr * mu + coh; // p_tr*paramsD.Q_FA;
// should use tau_max instead of s_0 according to
// "A constitutive law for dense granular flows" Nature 2006
if (tau_tr > tau_max) {
Real coeff = tau_max / (tau_tr + 1e-9);
updatedTauXxYyZz = updatedTauXxYyZz * coeff;
updatedTauXyXzYz = updatedTauXyXzYz * coeff;
}
}
// Set stress to the critical value if the pressure is smaller than it
if (p_tr < P_cri) {
Real coeff = abs(P_cri / (p_tr + 1e-9));
updatedTauXxYyZz = updatedTauXxYyZz * coeff;
updatedTauXyXzYz = updatedTauXyXzYz * coeff;
// updatedTauXxYyZz = mR3(0.0);
// updatedTauXyXzYz = mR3(0.0);
p_tr = P_cri;
}
// Set stress to zero if the particle is close to free surface
if (freeSurfaceIdD[index] == 1) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
}
if (paramsD.output_length == 2) {
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * (square(updatedTauXyXzYz.x) +
square(updatedTauXyXzYz.y) + square(updatedTauXyXzYz.z));
tau_tr = sqrt(0.5 * tau_tr);
sr_tau_I_mu_iD[index].y = tau_tr;
}
tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr);
tauXyXzYzD[index] = updatedTauXyXzYz;
}
//-------------
// ** position
//-------------
Real3 vel_XSPH = velMasD[index] + vel_XSPH_D[index]; // paramsD.EPS_XSPH *
Real3 posRad = mR3(posRadD[index]);
Real3 updatedPositon = posRad + vel_XSPH * dT;
if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) {
printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
posRadD[index] = mR4(updatedPositon, h);
//-------------
// ** velocity
//-------------
// Note that the velocity update should not use the XSPH contribution
// It adds dissipation to the solution, and provides numerical damping
Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index]
Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT;
velMasD[index] = updatedVelocity;
//-------------
// ** density
//-------------
if (paramsD.elastic_SPH) { // This is only implemented for granular material
rhoPresMu.y = p_tr;
rhoPresMu.x = paramsD.rho0;
} else {
Real rho2 = rhoPresMu.x + derivVelRho.w * dT;
rhoPresMu.y = Eos(rho2, rhoPresMu.w);
rhoPresMu.x = rho2;
}
if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) {
printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
rhoPresMuD[index] = rhoPresMu;
}
// Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time
// derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces.
// calculate the force that is f=m dv/dt
// derivVelRhoD[index] *= paramsD.markerMass;
}
//------------------------------------------------------------------------------
__global__ void Update_Fluid_State(Real3* new_vel,
Real4* posRad,
Real3* velMas,
Real4* rhoPreMu,
int4 updatePortion,
const size_t numAllMarkers,
double dT,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= updatePortion.y)
return;
velMas[i_idx] = new_vel[i_idx];
Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx];
Real h = posRad[i_idx].w;
posRad[i_idx] = mR4(newpos, h);
if (!(isfinite(posRad[i_idx].x) &&
isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) {
printf("Error! particle %d position is NAN: thrown from UpdateFluidDKernel %f,%f,%f,%f\n",
i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w);
}
if (!(isfinite(rhoPreMu[i_idx].x) &&
isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) {
printf("Error! particle %d rhoPreMu is NAN: thrown from UpdateFluidDKernel ! %f,%f,%f,%f\n",
i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w);
}
if (!(isfinite(velMas[i_idx].x) &&
isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) {
printf("Error! particle %d velocity is NAN: thrown from UpdateFluidDKernel !%f,%f,%f\n",
i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z);
}
}
// -----------------------------------------------------------------------------
// Kernel for updating the density.
// It calculates the density of the particle. It does include the normalization
// close to the boundaries and free surface.
__global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
size_t numAllMarkers) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
// read particle data from sorted arrays
Real3 posRadA = mR3(sortedPosRad[index]);
Real4 rhoPreMuA = sortedRhoPreMu[index];
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real numerator = 0.0;
Real denominator = 0.0;
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
collideCellDensityReInit(numerator, denominator, neighbourPos, index,
posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd);
}
}
}
rhoPreMuA.x = numerator; // denominator;
// rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w);
dummySortedRhoPreMu[index] = rhoPreMuA;
}
// -----------------------------------------------------------------------------
// Kernel for updating the activity of all particles.
__global__ void UpdateActivityD(Real4* posRadD,
Real3* velMasD,
Real3* posRigidBodiesD,
uint* activityIdentifierD,
uint* extendedActivityIdD,
int2 updatePortion,
size_t numRigidBodies,
Real Time,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
// Set the particle as an active particle
activityIdentifierD[index] = 1;
extendedActivityIdD[index] = 1;
// If during the settling phase, all particles are active
if (Time < paramsD.settlingTime)
return;
// Check the activity of this particle
uint isNotActive = 0;
uint isNotExtended = 0;
Real3 posRadA = mR3(posRadD[index]);
for (uint num = 0; num < numRigidBodies; num++) {
Real3 detPos = posRadA - posRigidBodiesD[num];
Real3 Acdomain = paramsD.bodyActiveDomain;
Real3 ExAcdomain = paramsD.bodyActiveDomain +
mR3(2 * RESOLUTION_LENGTH_MULT * paramsD.HSML);
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
// Set the particle as an inactive particle if needed
if (isNotActive == numRigidBodies && numRigidBodies > 0) {
activityIdentifierD[index] = 0;
velMasD[index] = mR3(0.0);
}
if (isNotExtended == numRigidBodies && numRigidBodies > 0)
extendedActivityIdD[index] = 0;
return;
}
// -----------------------------------------------------------------------------
// CLASS FOR FLUID DYNAMICS SYSTEM
// -----------------------------------------------------------------------------
ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker,
ChSystemFsi_impl& otherFsiSystem,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
TimeIntegrator type,
bool verb)
: fsiSystem(otherFsiSystem),
paramsH(otherParamsH),
numObjectsH(otherNumObjects),
integrator_type(type),
verbose(verb) {
switch (integrator_type) {
case TimeIntegrator::I2SPH:
forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an I2SPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::IISPH:
forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an IISPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::EXPLICITSPH:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created a WCSPH framework =======" << endl;
cout << "============================================" << endl;
}
break;
// Extend this function with your own linear solvers
default:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
cout << "Selected integrator type not implemented, reverting back to WCSPH" << endl;
}
}
// -----------------------------------------------------------------------------
ChFluidDynamics::~ChFluidDynamics() {}
// -----------------------------------------------------------------------------
void ChFluidDynamics::Initialize() {
forceSystem->Initialize();
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real dT,
Real Time) {
if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) {
this->UpdateActivity(sphMarkersD1, sphMarkersD2, fsiBodiesD, Time);
forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD);
} else
forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD);
if (integrator_type == TimeIntegrator::IISPH)
this->UpdateFluid_Implicit(sphMarkersD2);
else if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH)
this->UpdateFluid(sphMarkersD1, dT);
this->ApplyBoundarySPH_Markers(sphMarkersD2);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateActivity(std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
Real Time) {
// Update portion of the SPH particles (should be all particles here)
int2 updatePortion = mI2(0, (int)numObjectsH->numAllMarkers);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
UpdateActivityD<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD2->posRadD), mR3CAST(sphMarkersD1->velMasD),
mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->extendedActivityIdD),
updatePortion, numObjectsH->numRigidBodies, Time, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
//------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateActivityD!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) {
// Update portion of the SPH particles (should be fluid particles only here)
int2 updatePortion = mI2(0, fsiSystem.fsiGeneralData->referenceArray[0].y);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
UpdateFluidD<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD),
mR3CAST(sphMarkersD->tauXyXzYzD),
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(fsiSystem.fsiGeneralData->derivVelRhoD_old),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXxYyZzD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXyXzYzD),
mR4CAST(fsiSystem.fsiGeneralData->sr_tau_I_mu_i),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->freeSurfaceIdD),
updatePortion, dT, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
//------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateFluidD!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0;
int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0;
int4 updatePortion = mI4(fsiSystem.fsiGeneralData->referenceArray[haveHelper].x,
fsiSystem.fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0);
cout << "time step in UpdateFluid_Implicit " << paramsH->dT << endl;
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Update_Fluid_State<<<numBlocks, numThreads>>>(
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD), updatePortion,
numObjectsH->numAllMarkers, paramsH->dT, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in x, y, and z directions
void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
ApplyPeriodicBoundaryXKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryYKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryZKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
// ApplyOutOfBoundaryKernel<<<numBlocks, numThreads>>>
// (mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->velMasD));
// cudaDeviceSynchronize();
// cudaCheckError();
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in y, and z.
// The inlet/outlet BC is applied in the x direction.
// This functions needs to be tested.
void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
ApplyInletBoundaryXKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD));
cudaDeviceSynchronize();
cudaCheckError();
// these are useful anyway for out of bound particles
ApplyPeriodicBoundaryYKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryZKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::DensityReinitialization() {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers);
thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0));
ReCalcDensityD_F1<<<numBlocks, numThreads>>>(
mR4CAST(dummySortedRhoPreMu),
mR4CAST(fsiSystem.sortedSphMarkersD->posRadD),
mR3CAST(fsiSystem.sortedSphMarkersD->velMasD),
mR4CAST(fsiSystem.sortedSphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.markersProximityD->gridMarkerIndexD),
U1CAST(fsiSystem.markersProximityD->cellStartD),
U1CAST(fsiSystem.markersProximityD->cellEndD),
numObjectsH->numAllMarkers);
cudaDeviceSynchronize();
cudaCheckError();
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
dummySortedRhoPreMu.clear();
}
} // namespace fsi
} // end namespace chrono
|
523eabc8051fd9fe3237fa3b0a9f88d9ec7de274.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_left;
int xdim0_update_halo_kernel2_yvel_plus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_left;
int ydim0_update_halo_kernel2_yvel_plus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_left;
int xdim1_update_halo_kernel2_yvel_plus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_left;
int ydim1_update_halo_kernel2_yvel_plus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_yvel_plus_4_left*(y)+xdim0_update_halo_kernel2_yvel_plus_4_left*ydim0_update_halo_kernel2_yvel_plus_4_left*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_yvel_plus_4_left*(y)+xdim1_update_halo_kernel2_yvel_plus_4_left*ydim1_update_halo_kernel2_yvel_plus_4_left*(z))
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_left_gpu(double *yvel0, double *yvel1, const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0,0,0)] = yvel0[OPS_ACC0(4,0,0)];
if(fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0,0,0)] = yvel1[OPS_ACC1(4,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_yvel_plus_4_left + idx_z * 1*1 * xdim0_update_halo_kernel2_yvel_plus_4_left * ydim0_update_halo_kernel2_yvel_plus_4_left;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_yvel_plus_4_left + idx_z * 1*1 * xdim1_update_halo_kernel2_yvel_plus_4_left * ydim1_update_halo_kernel2_yvel_plus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,39)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(39,"update_halo_kernel2_yvel_plus_4_left");
OPS_kernels[39].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_left_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_left_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_left_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_left_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_yvel_plus_4_left, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_yvel_plus_4_left_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_yvel_plus_4_left, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_yvel_plus_4_left_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_yvel_plus_4_left, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_yvel_plus_4_left_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_yvel_plus_4_left, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_yvel_plus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[39].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 39;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 39;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(39,"update_halo_kernel2_yvel_plus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
| 523eabc8051fd9fe3237fa3b0a9f88d9ec7de274.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_left;
int xdim0_update_halo_kernel2_yvel_plus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_left;
int ydim0_update_halo_kernel2_yvel_plus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_left;
int xdim1_update_halo_kernel2_yvel_plus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_left;
int ydim1_update_halo_kernel2_yvel_plus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_yvel_plus_4_left*(y)+xdim0_update_halo_kernel2_yvel_plus_4_left*ydim0_update_halo_kernel2_yvel_plus_4_left*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_yvel_plus_4_left*(y)+xdim1_update_halo_kernel2_yvel_plus_4_left*ydim1_update_halo_kernel2_yvel_plus_4_left*(z))
//user function
__device__
inline void update_halo_kernel2_yvel_plus_4_left_gpu(double *yvel0, double *yvel1, const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0,0,0)] = yvel0[OPS_ACC0(4,0,0)];
if(fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0,0,0)] = yvel1[OPS_ACC1(4,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_yvel_plus_4_left + idx_z * 1*1 * xdim0_update_halo_kernel2_yvel_plus_4_left * ydim0_update_halo_kernel2_yvel_plus_4_left;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_yvel_plus_4_left + idx_z * 1*1 * xdim1_update_halo_kernel2_yvel_plus_4_left * ydim1_update_halo_kernel2_yvel_plus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,39)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(39,"update_halo_kernel2_yvel_plus_4_left");
OPS_kernels[39].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_left_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_left_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_left_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_left_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_yvel_plus_4_left, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_yvel_plus_4_left_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_yvel_plus_4_left, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_yvel_plus_4_left_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_yvel_plus_4_left, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_yvel_plus_4_left_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_yvel_plus_4_left, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_yvel_plus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_yvel_plus_4_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[39].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[39].mpi_time += t2-t1;
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[39].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 39;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 39;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(39,"update_halo_kernel2_yvel_plus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
ddc3aa237604e6b436fe29d6d4b585f9f5e004f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include <conio.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <windows.h>
#define TILE_WIDTH 8
using namespace std;
//Funciones que van a utilizarse a lo largo del programa
//CPU
void generarTablero(int *tablero, int filas, int columnas, int dificultad, int vidas);
void imprimirTablero(int *tablero, int filas, int columnas, int vidas);
void imprimirColumnas(int columnas);
int comprobarLleno(int *tablero, int filas, int columnas, int dificultad, bool &salida, int vidas);
void generarSemillas(int *tablero, int filas, int columnas, int dificultad);
void guardarPartida(int *tablero, int filas, int columnas, int dificultad);
void cargarPartida();
void modoManual(int *tablero, int filas, int columnas, int dificultad, int vidas);
void modoAutomatico(int *tablero, int filas, int columnas, int dificultad, int vidas);
void iniciar_partida(int vidas);
//GPU
__global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento);
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
int main(void){
//Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
char modo_juego;
int vidas = 2;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mnimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
cout << "Elija dificultad: \n1. Bajo, se lanzaran 15 semillas de 2, 4 y 8 \n"
"2. Dificil, se lanzaran 8 semillas de 2 y 4 \n";
cin >> dificultad;
while (!(dificultad == 1 || dificultad == 2)){
cout << "Dificultad no vlida \n";
cout << "Selecccione 1 si desea jugar con nivel o 2 si desea jugar con nivel dificil \n";
cin >> dificultad;
}
cout << "Elija modo de juego: \n A. Automtico \n M. Manual \n";
cin >> modo_juego;
while (!(modo_juego == 'M' || modo_juego == 'A')){
cout << "Modo de juego no vlido \n";
cout << "Selecccione A para jugar en modo automtico o M para manual \n";
cin >> modo_juego;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas, dificultad, vidas);
if (modo_juego == 'M')
modoManual(tablero, filas, columnas, dificultad, vidas);
else if (modo_juego == 'A')
modoAutomatico(tablero, filas, columnas, dificultad, vidas);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Generar tablero con nmeros aleatorios
void generarTablero(int *tablero, int filas, int columnas, int dificultad, int vidas){
if (vidas != 0){
srand(time(0));
int tamao = filas * columnas;
for (int i = 0; i < tamao; i++){
tablero[i] = 0;
}
generarSemillas(tablero, filas, columnas, dificultad);
}
else{
cout << "NO QUEDAN VIDAS";
system("PAUSE");
exit(0);
}
}
int comprobarLleno(int *tablero, int filas, int columnas, int dificultad, bool &salida, int vidas){
int tamao = filas * columnas;
int contador, posicion = 0;
if (dificultad == 1){
contador = 15;
while (contador > 0 && posicion < tamao){
if (tablero[posicion] == 0) contador--;
posicion++;
}
if (contador == 0) generarSemillas(tablero, filas, columnas, dificultad);
else{
vidas--;
cout << "Juego terminado\n";
cout << "VIDAS restantes:" << vidas;
cout << "\n";
if (vidas != 0){
cout << "Desea seguir jugando? (S/N)";
cout << "\n";
char seguir;
cin >> seguir;
while (seguir != 'S' || seguir != 'N'){
if (seguir == 'S'){
iniciar_partida(vidas);
}
}
}
else{
salida = true;
}
}
}
if (dificultad == 2){
contador = 8;
while (contador > 0 && posicion < tamao){
if (tablero[posicion] == 0) contador--;
posicion++;
}
if (contador == 0) generarSemillas(tablero, filas, columnas, dificultad);
else{
vidas--;
cout << "Juego terminado\n";
cout << "VIDAS restantes: " << vidas;
cout << "\n";
if (vidas != 0){
cout << "Desea seguir jugando? (S/N)";
cout << "\n";
char seguir;
cin >> seguir;
while (seguir != 'S' || seguir != 'N'){
if (seguir == 'S'){
iniciar_partida(vidas);
}
}
}
else{
salida = true;
}
}
}
return vidas;
}
void iniciar_partida(int vidas){
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
char modo_juego;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mnimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
cout << "Elija dificultad: \n1. Bajo, se lanzaran 15 semillas de 2, 4 y 8 \n"
"2. Dificil, se lanzaran 8 semillas de 2 y 4 \n";
cin >> dificultad;
while (!(dificultad == 1 || dificultad == 2)){
cout << "Dificultad no vlida \n";
cout << "Selecccione 1 si desea jugar con nivel o 2 si desea jugar con nivel dificil \n";
cin >> dificultad;
}
cout << "Elija modo de juego: \n A. Automtico \n M. Manual \n";
cin >> modo_juego;
while (!(modo_juego == 'M' || modo_juego == 'A')){
cout << "Modo de juego no vlido \n";
cout << "Selecccione A para jugar en modo automtico o M para manual \n";
cin >> modo_juego;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas, dificultad, vidas);
if (modo_juego == 'M')
modoManual(tablero, filas, columnas, dificultad, vidas);
else if (modo_juego == 'A')
modoAutomatico(tablero, filas, columnas, dificultad, vidas);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Genera los nmeros para jugar en el tablero
void generarSemillas(int *tablero, int filas, int columnas, int dificultad){
if (dificultad == 1){
int semillas = 0;
int valores[3] = { 2, 4, 8 };
while (semillas < 15){
int posicion = rand() % (filas*columnas + 1);
int valor = rand() % 3;
if (tablero[posicion] == 0){
tablero[posicion] = valores[valor];
semillas++;
}
}
}
if (dificultad == 2){
int semillas = 0;
int valores[3] = { 2, 4 };
while (semillas < 8){
int posicion = rand() % (filas*columnas + 1);
int valor = rand() % 2;
if (tablero[posicion] == 0){
tablero[posicion] = valores[valor];
semillas++;
}
}
}
}
//Funcin que imprime el nmero de columnas que va a tener el tablero para que sea ms facil elegir semillas
void imprimirColumnas(int columnas) {
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " " << i + 1;
}
else {
if (i < 9) {
cout << " " << i + 1;
}
else {
cout << " " << i + 1;
}
}
}
cout << "\n";
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " |";
}
else {
cout << " |";
}
}
cout << "\n";
}
//Imprimimos el tablero
void imprimirTablero(int *tablero, int filas, int columnas, int vidas) {
cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n";
cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n";
cout << "VIDAS :";
for (int i = 0; i < vidas; i++){
cout << "<3";
}
cout << "\n";
imprimirColumnas(columnas);
for (int i = 0; i < filas; i++) {
if (i < 9) {
cout << i + 1 << " - ";
}
else {
cout << i + 1 << " - ";
}
for (int k = 0; k < columnas; k++) {
//Damos color en funcin del nmero imprimido
int bloque = tablero[i * filas + k];
switch (bloque) {
case 2:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo
break;
case 4:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo
break;
case 8:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado
break;
case 16:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul
break;
default:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco
}
if (bloque < 10) cout << "| " << bloque << " |";
else cout << "| " << bloque << "|";
}
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7);
cout << "\n";
}
}
//En funcin del movimiento, llama a la comprobacin correspondiente
__device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
switch (movimiento){
case 'W':
compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'S':
compruebaArriba(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'D':
compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'A':
compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento);
break;
}
}
//Desplaza los nmeros respecto a los ceros que haya, en funcin del movimiento
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
if (movimiento == 'W'){
for (int i = filas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){
tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
else if (movimiento == 'S'){
for (int i = 0; i < filas - 1; i++){
for (int j = i; j < filas - 1; j++){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){
tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
else if (movimiento == 'D'){
for (int i = 0; i < columnas - 1; i++){
for (int j = i; j < columnas - 1; j++){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j + 1)] == 0 && tablero[fila * columnas + (j + 1)] != columnas){
tablero[fila * columnas + (j + 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
else if (movimiento == 'A'){
for (int i = columnas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j - 1)] == 0){
tablero[fila * columnas + (j - 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
}
//Comprueba hacia arriba
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila - 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia abajo
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila + 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia la derecha
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna + 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia la izquierda
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna - 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){
//Guardamos la columna y la fila del hilo
int columnaHilo = blockIdx.x*TILE_WIDTH + threadIdx.x;
int filaHilo = blockIdx.y*TILE_WIDTH + threadIdx.y;
compruebaSemillas(tablero, filaHilo, columnaHilo, filas, columnas, movimiento);
__syncthreads();
}
//Guarda la partida con el tablero, las filas, las columnas y la dificultad
void guardarPartida(int *tablero, int filas, int columnas, int dificultad) {
ofstream doc;
doc.open("partida.txt");
doc << filas << "\n";
doc << columnas << "\n";
doc << dificultad << "\n";
for (int i = 0; i < filas * columnas; i++) {
doc << tablero[i] << " ";
}
doc.close();
system("cls");
cout << "Guardado correctamente.\n\n";
}
//Carga la partida guardada
void cargarPartida() {
const string fichero = "partida.txt";
ifstream leer;
leer.open(fichero.c_str());
int d, *tablero;
int i = 0;
int n = 48;
int f = 0;
int c = 0;
int v = 5;
char fila[80];
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
f = f * 10 + (n - 48);
}
}
}
n = 48;
i = 0;
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
c = c * 10 + (n - 48);
}
}
}
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
d = (int)fila[0] - 48;
}
tablero = new int[f*c];
for (int i = 0; i < f * c; i++) {
leer.getline(fila, 80, ' ');
tablero[i] = (int)fila[0] - 48;
}
leer.close();
modoManual(tablero, f, c, d, v);
}
void modoManual(int *tablero, int filas, int columnas, int dificultad, int vidas){
system("cls");
char movimiento = ' ';
bool salida = false;
while (movimiento != 'Z' && salida == false){
imprimirTablero(tablero, filas, columnas, vidas);
cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n";
cin >> movimiento;
while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') {
cout << "Tecla no valida, introduzca una valida:\n";
cin >> movimiento;
}
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
hipMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
hipMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), hipMemcpyHostToDevice);
//Creamos los hilos
dim3 DimGrid(filas / TILE_WIDTH, filas / TILE_WIDTH);
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
hipMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, hipMemcpyDeviceToHost);
system("cls");
//Comprobamos si est lleno el tablero y en caso contrario genera las semillas y sigue jugando
comprobarLleno(tablero, filas, columnas, dificultad, salida, vidas);
hipFree(tablero_gpu);
}
system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas, dificultad);
}
else {
cout << "Saliendo sin guardar...\n \n";
exit(-1);
}
}
void modoAutomatico(int *tablero, int filas, int columnas, int dificultad, int vidas){
system("cls");
bool salida = false;
while (salida == false){
imprimirTablero(tablero, filas, columnas, vidas);
char movimientos[4] = { 'W', 'S', 'D', 'A' };
int mov = rand() % 4;
char movimiento = movimientos[mov];
cout << "El siguiente movimiento a realizar es: \n" << movimiento;
cout << "\n ";
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
hipMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
hipMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), hipMemcpyHostToDevice);
//Creamos los hilos
dim3 DimGrid(filas / TILE_WIDTH, filas / TILE_WIDTH);
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
hipMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, hipMemcpyDeviceToHost);
system("cls");
//Comprobamos si est lleno el tablero y en caso contrario genera las semillas y sigue jugando
comprobarLleno(tablero, filas, columnas, dificultad, salida, vidas);
hipFree(tablero_gpu);
}
system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas, dificultad);
}
else {
cout << "Saliendo sin guardar...\n \n";
exit(-1);
}
} | ddc3aa237604e6b436fe29d6d4b585f9f5e004f9.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <conio.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <windows.h>
#define TILE_WIDTH 8
using namespace std;
//Funciones que van a utilizarse a lo largo del programa
//CPU
void generarTablero(int *tablero, int filas, int columnas, int dificultad, int vidas);
void imprimirTablero(int *tablero, int filas, int columnas, int vidas);
void imprimirColumnas(int columnas);
int comprobarLleno(int *tablero, int filas, int columnas, int dificultad, bool &salida, int vidas);
void generarSemillas(int *tablero, int filas, int columnas, int dificultad);
void guardarPartida(int *tablero, int filas, int columnas, int dificultad);
void cargarPartida();
void modoManual(int *tablero, int filas, int columnas, int dificultad, int vidas);
void modoAutomatico(int *tablero, int filas, int columnas, int dificultad, int vidas);
void iniciar_partida(int vidas);
//GPU
__global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento);
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento);
int main(void){
//Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
char modo_juego;
int vidas = 2;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mínimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
cout << "Elija dificultad: \n1. Bajo, se lanzaran 15 semillas de 2, 4 y 8 \n"
"2. Dificil, se lanzaran 8 semillas de 2 y 4 \n";
cin >> dificultad;
while (!(dificultad == 1 || dificultad == 2)){
cout << "Dificultad no válida \n";
cout << "Selecccione 1 si desea jugar con nivel o 2 si desea jugar con nivel dificil \n";
cin >> dificultad;
}
cout << "Elija modo de juego: \n A. Automático \n M. Manual \n";
cin >> modo_juego;
while (!(modo_juego == 'M' || modo_juego == 'A')){
cout << "Modo de juego no válido \n";
cout << "Selecccione A para jugar en modo automático o M para manual \n";
cin >> modo_juego;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas, dificultad, vidas);
if (modo_juego == 'M')
modoManual(tablero, filas, columnas, dificultad, vidas);
else if (modo_juego == 'A')
modoAutomatico(tablero, filas, columnas, dificultad, vidas);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Generar tablero con números aleatorios
void generarTablero(int *tablero, int filas, int columnas, int dificultad, int vidas){
if (vidas != 0){
srand(time(0));
int tamaño = filas * columnas;
for (int i = 0; i < tamaño; i++){
tablero[i] = 0;
}
generarSemillas(tablero, filas, columnas, dificultad);
}
else{
cout << "NO QUEDAN VIDAS";
system("PAUSE");
exit(0);
}
}
int comprobarLleno(int *tablero, int filas, int columnas, int dificultad, bool &salida, int vidas){
int tamaño = filas * columnas;
int contador, posicion = 0;
if (dificultad == 1){
contador = 15;
while (contador > 0 && posicion < tamaño){
if (tablero[posicion] == 0) contador--;
posicion++;
}
if (contador == 0) generarSemillas(tablero, filas, columnas, dificultad);
else{
vidas--;
cout << "Juego terminado\n";
cout << "VIDAS restantes:" << vidas;
cout << "\n";
if (vidas != 0){
cout << "¿Desea seguir jugando? (S/N)";
cout << "\n";
char seguir;
cin >> seguir;
while (seguir != 'S' || seguir != 'N'){
if (seguir == 'S'){
iniciar_partida(vidas);
}
}
}
else{
salida = true;
}
}
}
if (dificultad == 2){
contador = 8;
while (contador > 0 && posicion < tamaño){
if (tablero[posicion] == 0) contador--;
posicion++;
}
if (contador == 0) generarSemillas(tablero, filas, columnas, dificultad);
else{
vidas--;
cout << "Juego terminado\n";
cout << "VIDAS restantes: " << vidas;
cout << "\n";
if (vidas != 0){
cout << "¿Desea seguir jugando? (S/N)";
cout << "\n";
char seguir;
cin >> seguir;
while (seguir != 'S' || seguir != 'N'){
if (seguir == 'S'){
iniciar_partida(vidas);
}
}
}
else{
salida = true;
}
}
}
return vidas;
}
void iniciar_partida(int vidas){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
//Propiedades del tablero
int *tablero;
int filas = 0;
int columnas = 0;
int dificultad = 0;
char modo_juego;
//Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo
cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n";
char partida = 'X';
cin >> partida;
while (partida != 'C' && partida != 'N') {
cout << "Introduce un valor valido para iniciar el juego\n";
cin >> partida;
}
if (partida == 'N'){
//Recogemos los datos de filas y columnas del tablero que vamos a usar
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
//Tablero mínimo de 4 por 4
while (filas < 4) {
cout << "El numero de filas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> filas;
}
while (columnas < 4) {
cout << "El numero de columnas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n";
cin >> columnas;
}
while (prop.maxThreadsPerBlock < (filas * columnas)) {
cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n";
cout << "Seleccione el numero de filas con las que desea jugar: \n";
cin >> filas;
cout << "Seleccione el numero de columnas con las que desea jugar: \n";
cin >> columnas;
}
cout << "Elija dificultad: \n1. Bajo, se lanzaran 15 semillas de 2, 4 y 8 \n"
"2. Dificil, se lanzaran 8 semillas de 2 y 4 \n";
cin >> dificultad;
while (!(dificultad == 1 || dificultad == 2)){
cout << "Dificultad no válida \n";
cout << "Selecccione 1 si desea jugar con nivel o 2 si desea jugar con nivel dificil \n";
cin >> dificultad;
}
cout << "Elija modo de juego: \n A. Automático \n M. Manual \n";
cin >> modo_juego;
while (!(modo_juego == 'M' || modo_juego == 'A')){
cout << "Modo de juego no válido \n";
cout << "Selecccione A para jugar en modo automático o M para manual \n";
cin >> modo_juego;
}
//Reservamos la memoria del tablero y lo inicializamos con generar tablero
tablero = new int[filas * columnas];
generarTablero(tablero, filas, columnas, dificultad, vidas);
if (modo_juego == 'M')
modoManual(tablero, filas, columnas, dificultad, vidas);
else if (modo_juego == 'A')
modoAutomatico(tablero, filas, columnas, dificultad, vidas);
}
else {
cargarPartida();
}
system("PAUSE");
}
//Genera los números para jugar en el tablero
void generarSemillas(int *tablero, int filas, int columnas, int dificultad){
if (dificultad == 1){
int semillas = 0;
int valores[3] = { 2, 4, 8 };
while (semillas < 15){
int posicion = rand() % (filas*columnas + 1);
int valor = rand() % 3;
if (tablero[posicion] == 0){
tablero[posicion] = valores[valor];
semillas++;
}
}
}
if (dificultad == 2){
int semillas = 0;
int valores[3] = { 2, 4 };
while (semillas < 8){
int posicion = rand() % (filas*columnas + 1);
int valor = rand() % 2;
if (tablero[posicion] == 0){
tablero[posicion] = valores[valor];
semillas++;
}
}
}
}
//Función que imprime el número de columnas que va a tener el tablero para que sea más facil elegir semillas
void imprimirColumnas(int columnas) {
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " " << i + 1;
}
else {
if (i < 9) {
cout << " " << i + 1;
}
else {
cout << " " << i + 1;
}
}
}
cout << "\n";
for (int i = 0; i < columnas; i++) {
if (i == 0) {
cout << " |";
}
else {
cout << " |";
}
}
cout << "\n";
}
//Imprimimos el tablero
void imprimirTablero(int *tablero, int filas, int columnas, int vidas) {
cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n";
cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n";
cout << "VIDAS :";
for (int i = 0; i < vidas; i++){
cout << "<3";
}
cout << "\n";
imprimirColumnas(columnas);
for (int i = 0; i < filas; i++) {
if (i < 9) {
cout << i + 1 << " - ";
}
else {
cout << i + 1 << " - ";
}
for (int k = 0; k < columnas; k++) {
//Damos color en función del número imprimido
int bloque = tablero[i * filas + k];
switch (bloque) {
case 2:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo
break;
case 4:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo
break;
case 8:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado
break;
case 16:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul
break;
default:
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco
}
if (bloque < 10) cout << "| " << bloque << " |";
else cout << "| " << bloque << "|";
}
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7);
cout << "\n";
}
}
//En función del movimiento, llama a la comprobación correspondiente
__device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
switch (movimiento){
case 'W':
compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'S':
compruebaArriba(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'D':
compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento);
break;
case 'A':
compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento);
break;
}
}
//Desplaza los números respecto a los ceros que haya, en función del movimiento
__device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
if (movimiento == 'W'){
for (int i = filas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){
tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
else if (movimiento == 'S'){
for (int i = 0; i < filas - 1; i++){
for (int j = i; j < filas - 1; j++){
if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){
tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna];
tablero[(j * columnas) + columna] = 0;
}
}
}
}
else if (movimiento == 'D'){
for (int i = 0; i < columnas - 1; i++){
for (int j = i; j < columnas - 1; j++){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j + 1)] == 0 && tablero[fila * columnas + (j + 1)] != columnas){
tablero[fila * columnas + (j + 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
else if (movimiento == 'A'){
for (int i = columnas - 1; i > 0; i--){
for (int j = i; j > 0; j--){
if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j - 1)] == 0){
tablero[fila * columnas + (j - 1)] = tablero[fila * columnas + j];
tablero[fila * columnas + j] = 0;
}
}
}
}
}
//Comprueba hacia arriba
__device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila - 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia abajo
__device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[((fila + 1) * columnas) + columna] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia la derecha
__device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna + 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
//Comprueba hacia la izquierda
__device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){
tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2;
tablero[(fila * columnas) + (columna - 1)] = 0;
moverCeros(tablero, fila, columna, filas, columnas, movimiento);
}
}
__global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){
//Guardamos la columna y la fila del hilo
int columnaHilo = blockIdx.x*TILE_WIDTH + threadIdx.x;
int filaHilo = blockIdx.y*TILE_WIDTH + threadIdx.y;
compruebaSemillas(tablero, filaHilo, columnaHilo, filas, columnas, movimiento);
__syncthreads();
}
//Guarda la partida con el tablero, las filas, las columnas y la dificultad
void guardarPartida(int *tablero, int filas, int columnas, int dificultad) {
ofstream doc;
doc.open("partida.txt");
doc << filas << "\n";
doc << columnas << "\n";
doc << dificultad << "\n";
for (int i = 0; i < filas * columnas; i++) {
doc << tablero[i] << " ";
}
doc.close();
system("cls");
cout << "Guardado correctamente.\n\n";
}
//Carga la partida guardada
void cargarPartida() {
const string fichero = "partida.txt";
ifstream leer;
leer.open(fichero.c_str());
int d, *tablero;
int i = 0;
int n = 48;
int f = 0;
int c = 0;
int v = 5;
char fila[80];
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
f = f * 10 + (n - 48);
}
}
}
n = 48;
i = 0;
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
while (n > 47 && n < 58) {
n = (int)fila[i];
i++;
if (n > 47 && n < 58) {
c = c * 10 + (n - 48);
}
}
}
if (!leer.fail()) {
leer.getline(fila, 80, '\n');
d = (int)fila[0] - 48;
}
tablero = new int[f*c];
for (int i = 0; i < f * c; i++) {
leer.getline(fila, 80, ' ');
tablero[i] = (int)fila[0] - 48;
}
leer.close();
modoManual(tablero, f, c, d, v);
}
void modoManual(int *tablero, int filas, int columnas, int dificultad, int vidas){
system("cls");
char movimiento = ' ';
bool salida = false;
while (movimiento != 'Z' && salida == false){
imprimirTablero(tablero, filas, columnas, vidas);
cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n";
cin >> movimiento;
while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') {
cout << "Tecla no valida, introduzca una valida:\n";
cin >> movimiento;
}
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
cudaMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
cudaMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), cudaMemcpyHostToDevice);
//Creamos los hilos
dim3 DimGrid(filas / TILE_WIDTH, filas / TILE_WIDTH);
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
cudaMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, cudaMemcpyDeviceToHost);
system("cls");
//Comprobamos si está lleno el tablero y en caso contrario genera las semillas y sigue jugando
comprobarLleno(tablero, filas, columnas, dificultad, salida, vidas);
cudaFree(tablero_gpu);
}
system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas, dificultad);
}
else {
cout << "Saliendo sin guardar...\n \n";
exit(-1);
}
}
void modoAutomatico(int *tablero, int filas, int columnas, int dificultad, int vidas){
system("cls");
bool salida = false;
while (salida == false){
imprimirTablero(tablero, filas, columnas, vidas);
char movimientos[4] = { 'W', 'S', 'D', 'A' };
int mov = rand() % 4;
char movimiento = movimientos[mov];
cout << "El siguiente movimiento a realizar es: \n" << movimiento;
cout << "\n ";
//CUDA
int *tablero_gpu;
//Reservamos memoria y copiamos tablero en GPU
cudaMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int));
cudaMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), cudaMemcpyHostToDevice);
//Creamos los hilos
dim3 DimGrid(filas / TILE_WIDTH, filas / TILE_WIDTH);
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento);
cudaMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, cudaMemcpyDeviceToHost);
system("cls");
//Comprobamos si está lleno el tablero y en caso contrario genera las semillas y sigue jugando
comprobarLleno(tablero, filas, columnas, dificultad, salida, vidas);
cudaFree(tablero_gpu);
}
system("cls");
cout << "Deseas guardar la partida? (S/N)\n";
char guardar = 'x';
cin >> guardar;
while (guardar != 'S' && guardar != 'N') {
system("cls");
cout << "Valor no valido, quieres guardar la partida? (S/N): \n";
cin >> guardar;
}
if (guardar == 'S') {
guardarPartida(tablero, filas, columnas, dificultad);
}
else {
cout << "Saliendo sin guardar...\n \n";
exit(-1);
}
} |
ebbbc9d6475a2bd03a3df5ea114642f8356c5f73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
//int n2=min(n-j,BlockSize);
/*for (int k=threadIdx.x;k<n2;k+=blockDim.x){
buffer[k+(k>>paddingLevel)]=inp[i*n+j+k];
}*/
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
/*for (int k=threadIdx.x;k<n2;k+=blockDim.x){
buffer[k+(k>>paddingLevel)]=inp[i*n+j+k];
}*/
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
/*for (int k=threadIdx.x;k<n2;k+=blockDim.x){
out[i*n+j+k]=buffer[k+(k>>paddingLevel)]+runningsum;
}*/
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
//float t=buffer4[n24-1]+runningsum2;
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
hipLaunchKernelGGL(( cumsumKernel), dim3(32),dim3(512), 0, 0, b,n,inp,out);
}
//require b*n working space
void probSampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
hipLaunchKernelGGL(( cumsumKernel), dim3(32),dim3(512), 0, 0, b,n,inp_p,temp);
hipLaunchKernelGGL(( binarysearchKernel), dim3(dim3(32,8,1)),dim3(512), 0, 0, b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
hipLaunchKernelGGL(( farthestpointsamplingKernel), dim3(32),dim3(512), 0, 0, b,n,m,inp,temp,out);
}
| ebbbc9d6475a2bd03a3df5ea114642f8356c5f73.cu | __global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){
const int BlockSize=2048;
const int paddingLevel=5;
__shared__ float buffer4[BlockSize*4];
__shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
float runningsum=0,runningsum2=0;
for (int j=0;j<n;j+=BlockSize*4){
//int n2=min(n-j,BlockSize);
/*for (int k=threadIdx.x;k<n2;k+=blockDim.x){
buffer[k+(k>>paddingLevel)]=inp[i*n+j+k];
}*/
int n24_i=min(n-j,BlockSize*4);
int n24=(n24_i+3)&~3;
int n2=n24>>2;
/*for (int k=threadIdx.x;k<n2;k+=blockDim.x){
buffer[k+(k>>paddingLevel)]=inp[i*n+j+k];
}*/
for (int k=threadIdx.x*4;k<n24_i;k+=blockDim.x*4){
if (k+3<n24_i){
float v1=inp[i*n+j+k];
float v2=inp[i*n+j+k+1];
v2+=v1;
float v3=inp[i*n+j+k+2];
float v4=inp[i*n+j+k+3];
v4+=v3;
v3+=v2;
v4+=v2;
buffer4[k]=v1;
buffer4[k+1]=v2;
buffer4[k+2]=v3;
buffer4[k+3]=v4;
buffer[(k>>2)+(k>>(2+paddingLevel))]=v4;
}else{
float v=0;
for (int k2=k;k2<n24_i;k2++){
v+=inp[i*n+j+k2];
buffer4[k2]=v;
}
for (int k2=n24_i;k2<n24;k2++){
buffer4[k2]=v;
}
buffer[(k>>2)+(k>>(2+paddingLevel))]=v;
}
}
int u=0;
for (;(2<<u)<=n2;u++){
__syncthreads();
for (int k=threadIdx.x;k<int(n2>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+2)<<u)-1;
int i2=(((k<<1)+1)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
u--;
for (;u>=0;u--){
__syncthreads();
for (int k=threadIdx.x;k<int((n2-(1<<u))>>(u+1));k+=blockDim.x){
int i1=(((k<<1)+3)<<u)-1;
int i2=(((k<<1)+2)<<u)-1;
i1+=i1>>paddingLevel;
i2+=i2>>paddingLevel;
buffer[i1]+=buffer[i2];
}
}
__syncthreads();
/*for (int k=threadIdx.x;k<n2;k+=blockDim.x){
out[i*n+j+k]=buffer[k+(k>>paddingLevel)]+runningsum;
}*/
for (int k=threadIdx.x*4;k<n24;k+=blockDim.x*4){
if (k!=0){
int k2=((k>>2)-1)+(((k>>2)-1)>>paddingLevel);
buffer4[k]+=buffer[k2];
buffer4[k+1]+=buffer[k2];
buffer4[k+2]+=buffer[k2];
buffer4[k+3]+=buffer[k2];
}
}
__syncthreads();
for (int k=threadIdx.x;k<n24_i;k+=blockDim.x){
out[i*n+j+k]=buffer4[k]+runningsum;
}
//float t=buffer4[n24-1]+runningsum2;
float t=buffer[(n2-1)+((n2-1)>>paddingLevel)]+runningsum2;
float r2=runningsum+t;
runningsum2=t-(r2-runningsum);
runningsum=r2;
__syncthreads();
}
}
}
__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){
int base=1;
while (base<n)
base<<=1;
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=blockIdx.y*blockDim.x+threadIdx.x;j<m;j+=blockDim.x*gridDim.y){
float q=query[i*m+j]*dataset[i*n+n-1];
int r=n-1;
for (int k=base;k>=1;k>>=1)
if (r>=k && dataset[i*n+r-k]>=q)
r-=k;
result[i*m+j]=r;
}
}
}
__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
void cumsumLauncher(int b,int n,const float * inp,float * out){
cumsumKernel<<<32,512>>>(b,n,inp,out);
}
//require b*n working space
void probSampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){
cumsumKernel<<<32,512>>>(b,n,inp_p,temp);
binarysearchKernel<<<dim3(32,8,1),512>>>(b,n,m,temp,inp_r,out);
}
//require 32*n working space
void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out);
}
|
aab47f56895e4fcf84aed37e0a125e9b3a7abee2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "SineWaveSimulation.h"
#include <algorithm>
#include <helper_cuda.h>
__global__ void sinewave(float *heightMap, unsigned int width, unsigned int height, float time)
{
const float freq = 4.0f;
const size_t stride = gridDim.x * blockDim.x;
// Iterate through the entire array in a way that is
// independent of the grid configuration
for (size_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < width * height; tid += stride) {
// Calculate the x, y coordinates
const size_t y = tid / width;
const size_t x = tid - y * width;
// Normalize x, y to [0,1]
const float u = ((2.0f * x) / width) - 1.0f;
const float v = ((2.0f * y) / height) - 1.0f;
// Calculate the new height value
const float w = 0.5f * sinf(u * freq + time) * cosf(v * freq + time);
// Store this new height value
heightMap[tid] = w;
}
}
SineWaveSimulation::SineWaveSimulation(size_t width, size_t height)
: m_heightMap(nullptr), m_width(width), m_height(height)
{
}
void SineWaveSimulation::initCudaLaunchConfig(int device)
{
hipDeviceProp_t prop = {};
checkCudaErrors(hipSetDevice(device));
checkCudaErrors(hipGetDeviceProperties(&prop, device));
// We don't need large block sizes, since there's not much inter-thread communication
m_threads = prop.warpSize;
// Use the occupancy calculator and fill the gpu as best as we can
checkCudaErrors(hipOccupancyMaxActiveBlocksPerMultiprocessor(&m_blocks, sinewave, prop.warpSize, 0));
m_blocks *= prop.multiProcessorCount;
// Go ahead and the clamp the blocks to the minimum needed for this height/width
m_blocks = ::min(m_blocks, (int)((m_width * m_height + m_threads - 1) / m_threads));
}
int SineWaveSimulation::initCuda(uint8_t *vkDeviceUUID, size_t UUID_SIZE)
{
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&device_count));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the GPU which is selected by Vulkan
while (current_device < device_count) {
hipGetDeviceProperties(&deviceProp, current_device);
if ((deviceProp.computeMode != hipComputeModeProhibited)) {
// Compare the cuda device UUID with vulkan UUID
int ret = memcmp((void*)&deviceProp.uuid, vkDeviceUUID, UUID_SIZE);
if (ret == 0)
{
checkCudaErrors(hipSetDevice(current_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, current_device));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, deviceProp.name, deviceProp.major,
deviceProp.minor);
return current_device;
}
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No Vulkan-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
SineWaveSimulation::~SineWaveSimulation()
{
m_heightMap = NULL;
}
void SineWaveSimulation::initSimulation(float *heights)
{
m_heightMap = heights;
}
void SineWaveSimulation::stepSimulation(float time, hipStream_t stream)
{
hipLaunchKernelGGL(( sinewave) , dim3(m_blocks), dim3(m_threads), 0, stream , m_heightMap, m_width, m_height, time);
getLastCudaError("Failed to launch CUDA simulation");
}
| aab47f56895e4fcf84aed37e0a125e9b3a7abee2.cu | /*
* Copyright 2019-2020 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include "SineWaveSimulation.h"
#include <algorithm>
#include <helper_cuda.h>
__global__ void sinewave(float *heightMap, unsigned int width, unsigned int height, float time)
{
const float freq = 4.0f;
const size_t stride = gridDim.x * blockDim.x;
// Iterate through the entire array in a way that is
// independent of the grid configuration
for (size_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < width * height; tid += stride) {
// Calculate the x, y coordinates
const size_t y = tid / width;
const size_t x = tid - y * width;
// Normalize x, y to [0,1]
const float u = ((2.0f * x) / width) - 1.0f;
const float v = ((2.0f * y) / height) - 1.0f;
// Calculate the new height value
const float w = 0.5f * sinf(u * freq + time) * cosf(v * freq + time);
// Store this new height value
heightMap[tid] = w;
}
}
SineWaveSimulation::SineWaveSimulation(size_t width, size_t height)
: m_heightMap(nullptr), m_width(width), m_height(height)
{
}
void SineWaveSimulation::initCudaLaunchConfig(int device)
{
cudaDeviceProp prop = {};
checkCudaErrors(cudaSetDevice(device));
checkCudaErrors(cudaGetDeviceProperties(&prop, device));
// We don't need large block sizes, since there's not much inter-thread communication
m_threads = prop.warpSize;
// Use the occupancy calculator and fill the gpu as best as we can
checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&m_blocks, sinewave, prop.warpSize, 0));
m_blocks *= prop.multiProcessorCount;
// Go ahead and the clamp the blocks to the minimum needed for this height/width
m_blocks = std::min(m_blocks, (int)((m_width * m_height + m_threads - 1) / m_threads));
}
int SineWaveSimulation::initCuda(uint8_t *vkDeviceUUID, size_t UUID_SIZE)
{
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&device_count));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the GPU which is selected by Vulkan
while (current_device < device_count) {
cudaGetDeviceProperties(&deviceProp, current_device);
if ((deviceProp.computeMode != cudaComputeModeProhibited)) {
// Compare the cuda device UUID with vulkan UUID
int ret = memcmp((void*)&deviceProp.uuid, vkDeviceUUID, UUID_SIZE);
if (ret == 0)
{
checkCudaErrors(cudaSetDevice(current_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, current_device));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, deviceProp.name, deviceProp.major,
deviceProp.minor);
return current_device;
}
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No Vulkan-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
SineWaveSimulation::~SineWaveSimulation()
{
m_heightMap = NULL;
}
void SineWaveSimulation::initSimulation(float *heights)
{
m_heightMap = heights;
}
void SineWaveSimulation::stepSimulation(float time, cudaStream_t stream)
{
sinewave <<< m_blocks, m_threads, 0, stream >>> (m_heightMap, m_width, m_height, time);
getLastCudaError("Failed to launch CUDA simulation");
}
|
c9b518bfe97cdf51ae2b8e8b002d0b969ce8e373.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_cuda.h>
#include <math.h>
#include <GL/glew.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// CUDA standard includes
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include "bodysystem.h"
__constant__ float softeningSquared;
__constant__ double softeningSquared_fp64;
hipError_t setSofteningSquared(float softeningSq)
{
return hipMemcpyToSymbol(softeningSquared,
&softeningSq,
sizeof(float), 0,
hipMemcpyHostToDevice);
}
hipError_t setSofteningSquared(double softeningSq)
{
return hipMemcpyToSymbol(softeningSquared_fp64,
&softeningSq,
sizeof(double), 0,
hipMemcpyHostToDevice);
}
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
template<typename T>
__device__ T rsqrt_T(T x)
{
return rsqrt(x);
}
template<>
__device__ float rsqrt_T<float>(float x)
{
return rsqrtf(x);
}
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
// This macro is only used when multithreadBodies is true (below)
#define SX_SUM(i,j) sharedPos[i+blockDim.x*j]
template <typename T>
__device__ T getSofteningSquared()
{
return softeningSquared;
}
template <>
__device__ double getSofteningSquared<double>()
{
return softeningSquared_fp64;
}
template <typename T>
struct DeviceData
{
T *dPos[2]; // mapped host pointers
T *dVel;
hipEvent_t event;
unsigned int offset;
unsigned int numBodies;
};
template <typename T>
__device__ typename vec3<T>::Type
bodyBodyInteraction(typename vec3<T>::Type ai,
typename vec4<T>::Type bi,
typename vec4<T>::Type bj)
{
typename vec3<T>::Type r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
T distSqr = r.x * r.x + r.y * r.y + r.z * r.z;
distSqr += getSofteningSquared<T>();
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = rsqrt_T(distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
// This is the "tile_calculation" function from the GPUG3 article.
template <typename T>
__device__ typename vec3<T>::Type
gravitation(typename vec4<T>::Type iPos,
typename vec3<T>::Type accel)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
// The CUDA 1.1 compiler cannot determine that i is not going to
// overflow in the loop below. Therefore if int is used on 64-bit linux
// or windows (or long instead of long long on win64), the compiler
// generates suboptimal code. Therefore we use long long on win64 and
// long on everything else. (Workaround for Bug ID 347697)
#ifdef _Win64
unsigned long long j = 0;
#else
unsigned long j = 0;
#endif
// Here we unroll the loop to reduce bookkeeping instruction overhead
// 32x unrolling seems to provide best performance
// Note that having an unsigned int loop counter and an unsigned
// long index helps the compiler generate efficient code on 64-bit
// OSes. The compiler can't assume the 64-bit index won't overflow
// so it incurs extra integer operations. This is a standard issue
// in porting 32-bit code to 64-bit OSes.
#pragma unroll 32
for (unsigned int counter = 0; counter < blockDim.x; counter++)
{
accel = bodyBodyInteraction<T>(accel, iPos, SX(j++));
}
return accel;
}
// WRAP is used to force each block to start working on a different
// chunk (and wrap around back to the beginning of the array) so that
// not all multiprocessors try to read the same memory locations at
// once.
#define WRAP(x,m) (((x)<(m))?(x):((x)-(m))) // Mod without divide, works on values from 0 up to 2m
#if 0
template <typename T, bool multithreadBodies>
__device__ typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type *positions,
int numBodies)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = (n + p*q - 1) / (p * q);
for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++)
{
int index = multithreadBodies ?
WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) :
WRAP(blockIdx.x + tile, gridDim.x-1);
index = index * p + threadIdx.x;
if (index < numBodies)
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] = positions[index];
else
sharedPos[threadIdx.x+blockDim.x*threadIdx.y].w = 0;
__syncthreads();
// This is the "tile_calculation" function from the GPUG3 article.
acc = gravitation<T>(bodyPos, acc);
__syncthreads();
}
// When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is
// underutilized. For example, with a 256 threads per block and 1024 bodies, there will only
// be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple
// threads per body. We still can use blocks of 256 threads, but they are arranged in q rows
// of p threads each. Each thread processes 1/q of the forces that affect each body, and then
// 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other
// threads for that body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256
// threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads per body is greater
// than one, so that when it is not we don't have to execute the more complex code required!
if (multithreadBodies)
{
SX_SUM(threadIdx.x, threadIdx.y).x = acc.x;
SX_SUM(threadIdx.x, threadIdx.y).y = acc.y;
SX_SUM(threadIdx.x, threadIdx.y).z = acc.z;
__syncthreads();
// Save the result in global memory for the integration step
if (threadIdx.y == 0)
{
for (int i = 1; i < blockDim.y; i++)
{
acc.x += SX_SUM(threadIdx.x,i).x;
acc.y += SX_SUM(threadIdx.x,i).y;
acc.z += SX_SUM(threadIdx.x,i).z;
}
}
}
return acc;
}
#endif
template <typename T, bool multithreadBodies>
__device__ typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type *positions,
int numBodies)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = n / (p * q);
// #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 200
// if (threadIdx.x == 0 && blockIdx.x == 0) {
// printf("computeBodyAccel: numTiles=%d, n = %d, p = %d, q = %d\n", numTiles, n, p, q);
// }
// #endif
for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++)
{
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] =
multithreadBodies ?
positions[WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) * p + threadIdx.x] :
positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x];
__syncthreads();
// This is the "tile_calculation" function from the GPUG3 article.
acc = gravitation<T>(bodyPos, acc);
__syncthreads();
}
// When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is
// underutilized. For example, with a 256 threads per block and 1024 bodies, there will only
// be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple
// threads per body. We still can use blocks of 256 threads, but they are arranged in q rows
// of p threads each. Each thread processes 1/q of the forces that affect each body, and then
// 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other
// threads for that body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256
// threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads per body is greater
// than one, so that when it is not we don't have to execute the more complex code required!
if (multithreadBodies)
{
SX_SUM(threadIdx.x, threadIdx.y).x = acc.x;
SX_SUM(threadIdx.x, threadIdx.y).y = acc.y;
SX_SUM(threadIdx.x, threadIdx.y).z = acc.z;
__syncthreads();
// Save the result in global memory for the integration step
if (threadIdx.y == 0)
{
for (int i = 1; i < blockDim.y; i++)
{
acc.x += SX_SUM(threadIdx.x,i).x;
acc.y += SX_SUM(threadIdx.x,i).y;
acc.z += SX_SUM(threadIdx.x,i).z;
}
}
}
return acc;
}
template<typename T, bool multithreadBodies>
__global__ void
integrateBodies(typename vec4<T>::Type *newPos,
typename vec4<T>::Type *oldPos,
typename vec4<T>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= deviceNumBodies)
{
return;
}
typename vec4<T>::Type position = oldPos[deviceOffset + index];
typename vec3<T>::Type accel = computeBodyAccel<T, multithreadBodies>(position, oldPos, totalNumBodies);
if (!multithreadBodies || (threadIdx.y == 0))
{
// acceleration = force \ mass;
// new velocity = old velocity + acceleration * deltaTime
// note we factor out the body's mass from the equation, here and in bodyBodyInteraction
// (because they cancel out). Thus here force == acceleration
typename vec4<T>::Type velocity = vel[deviceOffset + index];
velocity.x += accel.x * deltaTime;
velocity.y += accel.y * deltaTime;
velocity.z += accel.z * deltaTime;
velocity.x *= damping;
velocity.y *= damping;
velocity.z *= damping;
// new position = old position + velocity * deltaTime
position.x += velocity.x * deltaTime;
position.y += velocity.y * deltaTime;
position.z += velocity.z * deltaTime;
// store new position and velocity
newPos[deviceOffset + index] = position;
vel[deviceOffset + index] = velocity;
}
}
template <typename T>
void integrateNbodySystem(DeviceData<T> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p,
int q,
bool bUsePBO)
{
if (bUsePBO)
{
checkCudaErrors(hipGraphicsResourceSetMapFlags(pgres[currentRead], hipGraphicsMapFlagsReadOnly));
checkCudaErrors(hipGraphicsResourceSetMapFlags(pgres[1-currentRead], hipGraphicsMapFlagsWriteDiscard));
checkCudaErrors(hipGraphicsMapResources(2, pgres, 0));
size_t bytes;
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[currentRead]), &bytes, pgres[currentRead]));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[1-currentRead]), &bytes, pgres[1-currentRead]));
}
hipDeviceProp_t props;
for (unsigned int dev = 0; dev != numDevices; dev++)
{
if (numDevices > 1)
{
hipSetDevice(dev);
}
checkCudaErrors(hipGetDeviceProperties(&props, dev));
while ((deviceData[dev].numBodies > 0) && p > 1 &&
(deviceData[dev].numBodies / p < (unsigned)props.multiProcessorCount))
{
p /= 2;
q *= 2;
}
dim3 threads(p,q,1);
dim3 grid((deviceData[dev].numBodies + (p-1))/p, 1, 1);
// execute the kernel:
// When the numBodies / thread block size is < # multiprocessors
// (16 on G80), the GPU is underutilized. For example, with 256 threads per
// block and 1024 bodies, there will only be 4 thread blocks, so the
// GPU will only be 25% utilized. To improve this, we use multiple threads
// per body. We still can use blocks of 256 threads, but they are arranged
// in q rows of p threads each. Each thread processes 1/q of the forces
// that affect each body, and then 1/q of the threads (those with
// threadIdx.y==0) add up the partial sums from the other threads for that
// body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4
// threads per body and 256 threads per block. There will be n/p = 16
// blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads
// per body is greater than one, so that when it is not we don't have to
// execute the more complex code required!
int sharedMemSize = p * q * 4 * sizeof(T); // 4 floats for pos
if (grid.x > 0 && threads.y == 1)
{
hipLaunchKernelGGL(( integrateBodies<T, false>), dim3(grid), dim3(threads), sharedMemSize , 0,
(typename vec4<T>::Type *)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type *)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type *)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numBodies);
}
else if (grid.x > 0)
{
hipLaunchKernelGGL(( integrateBodies<T, true>), dim3(grid), dim3(threads), sharedMemSize , 0,
(typename vec4<T>::Type *)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type *)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type *)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numBodies);
}
if (numDevices > 1)
{
checkCudaErrors(hipEventRecord(deviceData[dev].event));
// MJH: Hack on older driver versions to force kernel launches to flush!
hipStreamQuery(0);
}
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
if (numDevices > 1)
{
for (unsigned int dev = 0; dev < numDevices; dev++)
{
checkCudaErrors(hipEventSynchronize(deviceData[dev].event));
}
}
if (bUsePBO)
{
checkCudaErrors(hipGraphicsUnmapResources(2, pgres, 0));
}
}
// Explicit specializations needed to generate code
template void integrateNbodySystem<float>(DeviceData<float> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p, int q,
bool bUsePBO);
template void integrateNbodySystem<double>(DeviceData<double> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p, int q,
bool bUsePBO);
| c9b518bfe97cdf51ae2b8e8b002d0b969ce8e373.cu | /*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_cuda.h>
#include <math.h>
#include <GL/glew.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
// CUDA standard includes
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include "bodysystem.h"
__constant__ float softeningSquared;
__constant__ double softeningSquared_fp64;
cudaError_t setSofteningSquared(float softeningSq)
{
return cudaMemcpyToSymbol(softeningSquared,
&softeningSq,
sizeof(float), 0,
cudaMemcpyHostToDevice);
}
cudaError_t setSofteningSquared(double softeningSq)
{
return cudaMemcpyToSymbol(softeningSquared_fp64,
&softeningSq,
sizeof(double), 0,
cudaMemcpyHostToDevice);
}
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
template<typename T>
__device__ T rsqrt_T(T x)
{
return rsqrt(x);
}
template<>
__device__ float rsqrt_T<float>(float x)
{
return rsqrtf(x);
}
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
// This macro is only used when multithreadBodies is true (below)
#define SX_SUM(i,j) sharedPos[i+blockDim.x*j]
template <typename T>
__device__ T getSofteningSquared()
{
return softeningSquared;
}
template <>
__device__ double getSofteningSquared<double>()
{
return softeningSquared_fp64;
}
template <typename T>
struct DeviceData
{
T *dPos[2]; // mapped host pointers
T *dVel;
cudaEvent_t event;
unsigned int offset;
unsigned int numBodies;
};
template <typename T>
__device__ typename vec3<T>::Type
bodyBodyInteraction(typename vec3<T>::Type ai,
typename vec4<T>::Type bi,
typename vec4<T>::Type bj)
{
typename vec3<T>::Type r;
// r_ij [3 FLOPS]
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
// distSqr = dot(r_ij, r_ij) + EPS^2 [6 FLOPS]
T distSqr = r.x * r.x + r.y * r.y + r.z * r.z;
distSqr += getSofteningSquared<T>();
// invDistCube =1/distSqr^(3/2) [4 FLOPS (2 mul, 1 sqrt, 1 inv)]
T invDist = rsqrt_T(distSqr);
T invDistCube = invDist * invDist * invDist;
// s = m_j * invDistCube [1 FLOP]
T s = bj.w * invDistCube;
// a_i = a_i + s * r_ij [6 FLOPS]
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
// This is the "tile_calculation" function from the GPUG3 article.
template <typename T>
__device__ typename vec3<T>::Type
gravitation(typename vec4<T>::Type iPos,
typename vec3<T>::Type accel)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
// The CUDA 1.1 compiler cannot determine that i is not going to
// overflow in the loop below. Therefore if int is used on 64-bit linux
// or windows (or long instead of long long on win64), the compiler
// generates suboptimal code. Therefore we use long long on win64 and
// long on everything else. (Workaround for Bug ID 347697)
#ifdef _Win64
unsigned long long j = 0;
#else
unsigned long j = 0;
#endif
// Here we unroll the loop to reduce bookkeeping instruction overhead
// 32x unrolling seems to provide best performance
// Note that having an unsigned int loop counter and an unsigned
// long index helps the compiler generate efficient code on 64-bit
// OSes. The compiler can't assume the 64-bit index won't overflow
// so it incurs extra integer operations. This is a standard issue
// in porting 32-bit code to 64-bit OSes.
#pragma unroll 32
for (unsigned int counter = 0; counter < blockDim.x; counter++)
{
accel = bodyBodyInteraction<T>(accel, iPos, SX(j++));
}
return accel;
}
// WRAP is used to force each block to start working on a different
// chunk (and wrap around back to the beginning of the array) so that
// not all multiprocessors try to read the same memory locations at
// once.
#define WRAP(x,m) (((x)<(m))?(x):((x)-(m))) // Mod without divide, works on values from 0 up to 2m
#if 0
template <typename T, bool multithreadBodies>
__device__ typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type *positions,
int numBodies)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = (n + p*q - 1) / (p * q);
for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++)
{
int index = multithreadBodies ?
WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) :
WRAP(blockIdx.x + tile, gridDim.x-1);
index = index * p + threadIdx.x;
if (index < numBodies)
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] = positions[index];
else
sharedPos[threadIdx.x+blockDim.x*threadIdx.y].w = 0;
__syncthreads();
// This is the "tile_calculation" function from the GPUG3 article.
acc = gravitation<T>(bodyPos, acc);
__syncthreads();
}
// When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is
// underutilized. For example, with a 256 threads per block and 1024 bodies, there will only
// be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple
// threads per body. We still can use blocks of 256 threads, but they are arranged in q rows
// of p threads each. Each thread processes 1/q of the forces that affect each body, and then
// 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other
// threads for that body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256
// threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads per body is greater
// than one, so that when it is not we don't have to execute the more complex code required!
if (multithreadBodies)
{
SX_SUM(threadIdx.x, threadIdx.y).x = acc.x;
SX_SUM(threadIdx.x, threadIdx.y).y = acc.y;
SX_SUM(threadIdx.x, threadIdx.y).z = acc.z;
__syncthreads();
// Save the result in global memory for the integration step
if (threadIdx.y == 0)
{
for (int i = 1; i < blockDim.y; i++)
{
acc.x += SX_SUM(threadIdx.x,i).x;
acc.y += SX_SUM(threadIdx.x,i).y;
acc.z += SX_SUM(threadIdx.x,i).z;
}
}
}
return acc;
}
#endif
template <typename T, bool multithreadBodies>
__device__ typename vec3<T>::Type
computeBodyAccel(typename vec4<T>::Type bodyPos,
typename vec4<T>::Type *positions,
int numBodies)
{
typename vec4<T>::Type *sharedPos = SharedMemory<typename vec4<T>::Type>();
typename vec3<T>::Type acc = {0.0f, 0.0f, 0.0f};
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = n / (p * q);
// #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 200
// if (threadIdx.x == 0 && blockIdx.x == 0) {
// printf("computeBodyAccel: numTiles=%d, n = %d, p = %d, q = %d\n", numTiles, n, p, q);
// }
// #endif
for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++)
{
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] =
multithreadBodies ?
positions[WRAP(blockIdx.x + q * tile + threadIdx.y, gridDim.x) * p + threadIdx.x] :
positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x];
__syncthreads();
// This is the "tile_calculation" function from the GPUG3 article.
acc = gravitation<T>(bodyPos, acc);
__syncthreads();
}
// When the numBodies / thread block size is < # multiprocessors (16 on G80), the GPU is
// underutilized. For example, with a 256 threads per block and 1024 bodies, there will only
// be 4 thread blocks, so the GPU will only be 25% utilized. To improve this, we use multiple
// threads per body. We still can use blocks of 256 threads, but they are arranged in q rows
// of p threads each. Each thread processes 1/q of the forces that affect each body, and then
// 1/q of the threads (those with threadIdx.y==0) add up the partial sums from the other
// threads for that body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4 threads per body and 256
// threads per block. There will be n/p = 16 blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads per body is greater
// than one, so that when it is not we don't have to execute the more complex code required!
if (multithreadBodies)
{
SX_SUM(threadIdx.x, threadIdx.y).x = acc.x;
SX_SUM(threadIdx.x, threadIdx.y).y = acc.y;
SX_SUM(threadIdx.x, threadIdx.y).z = acc.z;
__syncthreads();
// Save the result in global memory for the integration step
if (threadIdx.y == 0)
{
for (int i = 1; i < blockDim.y; i++)
{
acc.x += SX_SUM(threadIdx.x,i).x;
acc.y += SX_SUM(threadIdx.x,i).y;
acc.z += SX_SUM(threadIdx.x,i).z;
}
}
}
return acc;
}
template<typename T, bool multithreadBodies>
__global__ void
integrateBodies(typename vec4<T>::Type *newPos,
typename vec4<T>::Type *oldPos,
typename vec4<T>::Type *vel,
unsigned int deviceOffset, unsigned int deviceNumBodies,
float deltaTime, float damping, int totalNumBodies)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= deviceNumBodies)
{
return;
}
typename vec4<T>::Type position = oldPos[deviceOffset + index];
typename vec3<T>::Type accel = computeBodyAccel<T, multithreadBodies>(position, oldPos, totalNumBodies);
if (!multithreadBodies || (threadIdx.y == 0))
{
// acceleration = force \ mass;
// new velocity = old velocity + acceleration * deltaTime
// note we factor out the body's mass from the equation, here and in bodyBodyInteraction
// (because they cancel out). Thus here force == acceleration
typename vec4<T>::Type velocity = vel[deviceOffset + index];
velocity.x += accel.x * deltaTime;
velocity.y += accel.y * deltaTime;
velocity.z += accel.z * deltaTime;
velocity.x *= damping;
velocity.y *= damping;
velocity.z *= damping;
// new position = old position + velocity * deltaTime
position.x += velocity.x * deltaTime;
position.y += velocity.y * deltaTime;
position.z += velocity.z * deltaTime;
// store new position and velocity
newPos[deviceOffset + index] = position;
vel[deviceOffset + index] = velocity;
}
}
template <typename T>
void integrateNbodySystem(DeviceData<T> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p,
int q,
bool bUsePBO)
{
if (bUsePBO)
{
checkCudaErrors(cudaGraphicsResourceSetMapFlags(pgres[currentRead], cudaGraphicsMapFlagsReadOnly));
checkCudaErrors(cudaGraphicsResourceSetMapFlags(pgres[1-currentRead], cudaGraphicsMapFlagsWriteDiscard));
checkCudaErrors(cudaGraphicsMapResources(2, pgres, 0));
size_t bytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[currentRead]), &bytes, pgres[currentRead]));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&(deviceData[0].dPos[1-currentRead]), &bytes, pgres[1-currentRead]));
}
cudaDeviceProp props;
for (unsigned int dev = 0; dev != numDevices; dev++)
{
if (numDevices > 1)
{
cudaSetDevice(dev);
}
checkCudaErrors(cudaGetDeviceProperties(&props, dev));
while ((deviceData[dev].numBodies > 0) && p > 1 &&
(deviceData[dev].numBodies / p < (unsigned)props.multiProcessorCount))
{
p /= 2;
q *= 2;
}
dim3 threads(p,q,1);
dim3 grid((deviceData[dev].numBodies + (p-1))/p, 1, 1);
// execute the kernel:
// When the numBodies / thread block size is < # multiprocessors
// (16 on G80), the GPU is underutilized. For example, with 256 threads per
// block and 1024 bodies, there will only be 4 thread blocks, so the
// GPU will only be 25% utilized. To improve this, we use multiple threads
// per body. We still can use blocks of 256 threads, but they are arranged
// in q rows of p threads each. Each thread processes 1/q of the forces
// that affect each body, and then 1/q of the threads (those with
// threadIdx.y==0) add up the partial sums from the other threads for that
// body. To enable this, use the "--p=" and "--q=" command line options to
// this example. e.g.: "nbody.exe --n=1024 --p=64 --q=4" will use 4
// threads per body and 256 threads per block. There will be n/p = 16
// blocks, so a G80 GPU will be 100% utilized.
// We use a bool template parameter to specify when the number of threads
// per body is greater than one, so that when it is not we don't have to
// execute the more complex code required!
int sharedMemSize = p * q * 4 * sizeof(T); // 4 floats for pos
if (grid.x > 0 && threads.y == 1)
{
integrateBodies<T, false><<< grid, threads, sharedMemSize >>>
((typename vec4<T>::Type *)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type *)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type *)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numBodies);
}
else if (grid.x > 0)
{
integrateBodies<T, true><<< grid, threads, sharedMemSize >>>
((typename vec4<T>::Type *)deviceData[dev].dPos[1-currentRead],
(typename vec4<T>::Type *)deviceData[dev].dPos[currentRead],
(typename vec4<T>::Type *)deviceData[dev].dVel,
deviceData[dev].offset, deviceData[dev].numBodies,
deltaTime, damping, numBodies);
}
if (numDevices > 1)
{
checkCudaErrors(cudaEventRecord(deviceData[dev].event));
// MJH: Hack on older driver versions to force kernel launches to flush!
cudaStreamQuery(0);
}
// check if kernel invocation generated an error
getLastCudaError("Kernel execution failed");
}
if (numDevices > 1)
{
for (unsigned int dev = 0; dev < numDevices; dev++)
{
checkCudaErrors(cudaEventSynchronize(deviceData[dev].event));
}
}
if (bUsePBO)
{
checkCudaErrors(cudaGraphicsUnmapResources(2, pgres, 0));
}
}
// Explicit specializations needed to generate code
template void integrateNbodySystem<float>(DeviceData<float> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p, int q,
bool bUsePBO);
template void integrateNbodySystem<double>(DeviceData<double> *deviceData,
cudaGraphicsResource **pgres,
unsigned int currentRead,
float deltaTime,
float damping,
unsigned int numBodies,
unsigned int numDevices,
int p, int q,
bool bUsePBO);
|
fc60821a7f5bed08ef16ab7b94205f345c7984e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void sin(float *a, float *b)
{
int i = blockIdx.x;
b[i] = sinf(a[i]);
}
int main()
{
float a[20], b[20];
int n, i;
printf("Enter size");
scanf("%d", &n);
printf("\nEnter rad array \n");
for(i = 0; i < n; i++)
scanf("%f", &a[i]);
float *d_a, *d_b;
int size = sizeof(float) * 20;
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sin), dim3(n),dim3(1), 0, 0, d_a, d_b);
hipMemcpy(&b, d_b, size, hipMemcpyDeviceToHost);
printf("\nResultant arr \n");
for(i = 0; i < n; i++)
printf("%f ", b[i]);
printf("\n");
hipFree(d_a);
hipFree(d_b);
return 0;
}
| fc60821a7f5bed08ef16ab7b94205f345c7984e5.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void sin(float *a, float *b)
{
int i = blockIdx.x;
b[i] = sinf(a[i]);
}
int main()
{
float a[20], b[20];
int n, i;
printf("Enter size");
scanf("%d", &n);
printf("\nEnter rad array \n");
for(i = 0; i < n; i++)
scanf("%f", &a[i]);
float *d_a, *d_b;
int size = sizeof(float) * 20;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
sin<<<n,1>>>(d_a, d_b);
cudaMemcpy(&b, d_b, size, cudaMemcpyDeviceToHost);
printf("\nResultant arr \n");
for(i = 0; i < n; i++)
printf("%f ", b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
9f86cdf9fd9e27cc42c69eb8bddd6ef1e4c87e2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NUM_THREADS 32
#define NUM_BLOCKS 1024
__global__ void convert_kernel(float * dist, size_t pitch_dist, size_t n)
{
for(size_t index = threadIdx.x; index < n; index += NUM_THREADS) {
dist[index * pitch_dist + index] = CUDART_INF_F;
}
}
__global__ void find_min1_kernel(const float * dist, const size_t pitch_dist,
const size_t n, const float * count, float * min_val, size_t * min_col,
const size_t row_offset)
{
// Determine which row this block will handle
const size_t row = row_offset + blockIdx.x;
// If the row has already been merged, skip the work
if((threadIdx.x == 0) && (row < n) && (count[row] < 0.f)) {
min_val[row] = CUDART_INF_F;
min_col[row] = 0;
}
if((row >= n) || (count[row] <= 0.f))
return;
__shared__ float vals[NUM_THREADS];
__shared__ size_t cols[NUM_THREADS];
// Initialize with identity
vals[threadIdx.x] = CUDART_INF_F;
// Find the minimum
for(size_t col = threadIdx.x; col <= row; col += NUM_THREADS) {
float t = dist[row * pitch_dist + col];
if(t < vals[threadIdx.x]) {
vals[threadIdx.x] = t;
cols[threadIdx.x] = col;
}
}
__syncthreads();
// Reduce
for(size_t stride = NUM_THREADS >> 1; stride > 0; stride >>= 1) {
if((threadIdx.x < stride)
&& (vals[threadIdx.x] > vals[threadIdx.x + stride]))
{
vals[threadIdx.x] = vals[threadIdx.x + stride];
cols[threadIdx.x] = cols[threadIdx.x + stride];
}
__syncthreads();
}
// Write the result
if(threadIdx.x == 0) {
min_val[row] = vals[0];
min_col[row] = cols[0];
}
}
__global__ void find_min2_kernel(const float * min_val, const size_t * min_col,
float * count, int * sub, int * sup, float * val, const size_t n,
const size_t iter)
{
__shared__ float vals[NUM_THREADS];
__shared__ size_t cols[NUM_THREADS];
// Initialize with identity
vals[threadIdx.x] = CUDART_INF_F;
// Find the minimum
for(size_t row = threadIdx.x; row < n; row += NUM_THREADS) {
float t = min_val[row];
if(t < vals[threadIdx.x]) {
vals[threadIdx.x] = t;
cols[threadIdx.x] = row;
}
}
__syncthreads();
// Reduce
for(size_t stride = NUM_THREADS >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride) {
if(vals[threadIdx.x] > vals[threadIdx.x + stride]) {
vals[threadIdx.x] = vals[threadIdx.x + stride];
cols[threadIdx.x] = cols[threadIdx.x + stride];
}
}
__syncthreads();
}
// Write out
if(threadIdx.x == 0) {
// Winning value is vals[0]
// Winning row is cols[0]
// Winning column is min_col[cols[0]]
int row_winner = cols[0];
int col_winner = min_col[cols[0]];
val[iter] = vals[0];
sub[iter] = col_winner;
sup[iter] = row_winner;
count[row_winner] += count[col_winner];
count[col_winner] *= -1.f;
}
}
__global__ void single_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = min(bot_val, top_val);
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void complete_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int
col_winner = sub[iter], row_winner = sup[iter];
float
top_val = dist[col_winner * pitch_dist + col],
bot_val = dist[row_winner * pitch_dist + col];
bot_val = fmaxf(bot_val, top_val);
if((col == col_winner) || (col == row_winner))
bot_val = CUDART_INF_F;
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void wpgma_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = (bot_val + top_val) / 2.0;
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void average_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
float nr = count[row_winner];
float np = -1.0 * count[col_winner];
float nq = nr - np;
bot_val = (top_val * np + bot_val * nq) / nr;
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void median_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = (bot_val + top_val) / 2.0 - val[iter] / 4.0;
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void mcquitty_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = (bot_val + top_val) / 2.0;
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void centroid_kernel(float * dist, size_t pitch_dist,
size_t n, const int * sub, const int * sup, const float * count,
const float * val, size_t iter, size_t col_offset,
float lambda, float beta)
{
size_t
col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
if(col < n) { // don't run off the end of the arrays
int
col_winner = sub[iter], row_winner = sup[iter];
float
top_val = dist[col_winner * pitch_dist + col],
bot_val = dist[row_winner * pitch_dist + col],
nr = count[row_winner], np = -count[col_winner],
nq = nr - np;
bot_val = (top_val * np + bot_val * nq)/nr
- (np * nq * val[iter])/(nr * nr);
// bot_val = (nr * (bot_val * np + top_val * nq) - np * nq * val[iter])
// / (nr * nr);
/*
float nr = count[row_winner];
float np = -1.0 * count[col_winner];
float nq = nr - np;
bot_val = (top_val * np + bot_val * nq) / nr;
*/
if(col == col_winner || col == row_winner)
bot_val = CUDART_INF_F;
top_val = CUDART_INF_F;
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void flexible_group_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
float nr = count[row_winner];
float np = -1.0 * count[col_winner];
float nq = nr - np;
bot_val = (bot_val * (1.0 - lambda) * np + top_val * (1.0 - lambda) * nq) / nr + beta * val[iter];
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void flexible_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = (bot_val * (1.0 - lambda) + top_val * (1.0 - lambda) ) / 2.0 + beta * val[iter];
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void ward_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t
col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
if(col >= n)
return;
int
col_winner = sub[iter], row_winner = sup[iter];
float
top_val = dist[col_winner * pitch_dist + col],
bot_val = dist[row_winner * pitch_dist + col],
nr = count[row_winner], np = -count[col_winner],
nq = nr - np, nk = count[col];
if((nr == -nk) || (col == col_winner) || (col == row_winner)) {
bot_val = CUDART_INF_F;
} else {
bot_val = (bot_val * (np + nk) + top_val * (nq + nk) - val[iter] * nk);
bot_val /= (nr + nk);
if(isinf(bot_val)) {
bot_val = CUDART_INF_F;
}
}
top_val = CUDART_INF_F;
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
| 9f86cdf9fd9e27cc42c69eb8bddd6ef1e4c87e2a.cu | #define NUM_THREADS 32
#define NUM_BLOCKS 1024
__global__ void convert_kernel(float * dist, size_t pitch_dist, size_t n)
{
for(size_t index = threadIdx.x; index < n; index += NUM_THREADS) {
dist[index * pitch_dist + index] = CUDART_INF_F;
}
}
__global__ void find_min1_kernel(const float * dist, const size_t pitch_dist,
const size_t n, const float * count, float * min_val, size_t * min_col,
const size_t row_offset)
{
// Determine which row this block will handle
const size_t row = row_offset + blockIdx.x;
// If the row has already been merged, skip the work
if((threadIdx.x == 0) && (row < n) && (count[row] < 0.f)) {
min_val[row] = CUDART_INF_F;
min_col[row] = 0;
}
if((row >= n) || (count[row] <= 0.f))
return;
__shared__ float vals[NUM_THREADS];
__shared__ size_t cols[NUM_THREADS];
// Initialize with identity
vals[threadIdx.x] = CUDART_INF_F;
// Find the minimum
for(size_t col = threadIdx.x; col <= row; col += NUM_THREADS) {
float t = dist[row * pitch_dist + col];
if(t < vals[threadIdx.x]) {
vals[threadIdx.x] = t;
cols[threadIdx.x] = col;
}
}
__syncthreads();
// Reduce
for(size_t stride = NUM_THREADS >> 1; stride > 0; stride >>= 1) {
if((threadIdx.x < stride)
&& (vals[threadIdx.x] > vals[threadIdx.x + stride]))
{
vals[threadIdx.x] = vals[threadIdx.x + stride];
cols[threadIdx.x] = cols[threadIdx.x + stride];
}
__syncthreads();
}
// Write the result
if(threadIdx.x == 0) {
min_val[row] = vals[0];
min_col[row] = cols[0];
}
}
__global__ void find_min2_kernel(const float * min_val, const size_t * min_col,
float * count, int * sub, int * sup, float * val, const size_t n,
const size_t iter)
{
__shared__ float vals[NUM_THREADS];
__shared__ size_t cols[NUM_THREADS];
// Initialize with identity
vals[threadIdx.x] = CUDART_INF_F;
// Find the minimum
for(size_t row = threadIdx.x; row < n; row += NUM_THREADS) {
float t = min_val[row];
if(t < vals[threadIdx.x]) {
vals[threadIdx.x] = t;
cols[threadIdx.x] = row;
}
}
__syncthreads();
// Reduce
for(size_t stride = NUM_THREADS >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride) {
if(vals[threadIdx.x] > vals[threadIdx.x + stride]) {
vals[threadIdx.x] = vals[threadIdx.x + stride];
cols[threadIdx.x] = cols[threadIdx.x + stride];
}
}
__syncthreads();
}
// Write out
if(threadIdx.x == 0) {
// Winning value is vals[0]
// Winning row is cols[0]
// Winning column is min_col[cols[0]]
int row_winner = cols[0];
int col_winner = min_col[cols[0]];
val[iter] = vals[0];
sub[iter] = col_winner;
sup[iter] = row_winner;
count[row_winner] += count[col_winner];
count[col_winner] *= -1.f;
}
}
__global__ void single_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = min(bot_val, top_val);
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void complete_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int
col_winner = sub[iter], row_winner = sup[iter];
float
top_val = dist[col_winner * pitch_dist + col],
bot_val = dist[row_winner * pitch_dist + col];
bot_val = fmaxf(bot_val, top_val);
if((col == col_winner) || (col == row_winner))
bot_val = CUDART_INF_F;
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void wpgma_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = (bot_val + top_val) / 2.0;
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void average_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
float nr = count[row_winner];
float np = -1.0 * count[col_winner];
float nq = nr - np;
bot_val = (top_val * np + bot_val * nq) / nr;
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void median_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = (bot_val + top_val) / 2.0 - val[iter] / 4.0;
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void mcquitty_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = (bot_val + top_val) / 2.0;
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void centroid_kernel(float * dist, size_t pitch_dist,
size_t n, const int * sub, const int * sup, const float * count,
const float * val, size_t iter, size_t col_offset,
float lambda, float beta)
{
size_t
col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
if(col < n) { // don't run off the end of the arrays
int
col_winner = sub[iter], row_winner = sup[iter];
float
top_val = dist[col_winner * pitch_dist + col],
bot_val = dist[row_winner * pitch_dist + col],
nr = count[row_winner], np = -count[col_winner],
nq = nr - np;
bot_val = (top_val * np + bot_val * nq)/nr
- (np * nq * val[iter])/(nr * nr);
// bot_val = (nr * (bot_val * np + top_val * nq) - np * nq * val[iter])
// / (nr * nr);
/*
float nr = count[row_winner];
float np = -1.0 * count[col_winner];
float nq = nr - np;
bot_val = (top_val * np + bot_val * nq) / nr;
*/
if(col == col_winner || col == row_winner)
bot_val = CUDART_INF_F;
top_val = CUDART_INF_F;
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void flexible_group_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
float nr = count[row_winner];
float np = -1.0 * count[col_winner];
float nq = nr - np;
bot_val = (bot_val * (1.0 - lambda) * np + top_val * (1.0 - lambda) * nq) / nr + beta * val[iter];
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void flexible_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
// If it matters
if(col < n) {
int col_winner = sub[iter];
int row_winner = sup[iter];
float top_val = dist[col_winner * pitch_dist + col];
float bot_val = dist[row_winner * pitch_dist + col];
bot_val = (bot_val * (1.0 - lambda) + top_val * (1.0 - lambda) ) / 2.0 + beta * val[iter];
if(col == col_winner || col == row_winner) {
bot_val = CUDART_INF_F;
}
top_val = CUDART_INF_F;
// Write out
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
}
__global__ void ward_kernel(float * dist, const size_t pitch_dist,
const size_t n, const int * sub, const int * sup, const float * count,
const float * val, const size_t iter, const size_t col_offset,
const float lambda, const float beta)
{
const size_t
col = col_offset + NUM_THREADS * blockIdx.x + threadIdx.x;
if(col >= n)
return;
int
col_winner = sub[iter], row_winner = sup[iter];
float
top_val = dist[col_winner * pitch_dist + col],
bot_val = dist[row_winner * pitch_dist + col],
nr = count[row_winner], np = -count[col_winner],
nq = nr - np, nk = count[col];
if((nr == -nk) || (col == col_winner) || (col == row_winner)) {
bot_val = CUDART_INF_F;
} else {
bot_val = (bot_val * (np + nk) + top_val * (nq + nk) - val[iter] * nk);
bot_val /= (nr + nk);
if(isinf(bot_val)) {
bot_val = CUDART_INF_F;
}
}
top_val = CUDART_INF_F;
dist[col_winner * pitch_dist + col] = top_val;
dist[col * pitch_dist + col_winner] = top_val;
dist[row_winner * pitch_dist + col] = bot_val;
dist[col * pitch_dist + row_winner] = bot_val;
}
|
5ff7653077c28eb8525287db396f2f1405707332.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* FILENAME: test_image_minarearect.cu
*
* AUTHORS: Chen Fting START DATE: Tuesday August 17th 2021
*
* LAST MODIFIED: Thursday, August 19th 2021, 2:20:57 pm
*
* CONTACT: fting.chen@smartmore.com
*******************************************************************************/
#define CATCH_CONFIG_MAIN
#include <hip/hip_runtime.h>
#include <cudaop/cudaop.h>
#include <macro.h>
#include <utils.h>
#include <catch2/catch.hpp>
#include <opencv2/opencv.hpp>
TEST_CASE("MinAreaRect", "[minarearect]") {
int height = 2000, width = 2;
std::vector<float> input_data(height * width);
smartmore::RandomFloatVector(input_data);
std::vector<cv::Point2f> points;
for (int i = 0; i < height; i++) {
cv::Point2f point;
point.x = input_data[i * 2];
point.y = input_data[i * 2 + 1];
points.push_back(point);
}
int size = (height * (height - 1) / 2 + 511) / 512 * 11;
cv::RotatedRect box = cv::minAreaRect(points);
cv::Mat expect;
cv::boxPoints(box, expect);
std::cout << expect << std::endl;
std::vector<float> actual(8);
void *input_device = nullptr, *output_device = nullptr, *g_data = nullptr;
CUDA_CHECK(hipMalloc(&input_device, input_data.size() * sizeof(float)));
CUDA_CHECK(hipMalloc(&output_device, 8 * sizeof(float)));
CUDA_CHECK(hipMalloc(&g_data, size * sizeof(float)));
CUDA_CHECK(hipMemcpy(input_device, input_data.data(), input_data.size() * sizeof(float), hipMemcpyHostToDevice));
using namespace smartmore::cudaop;
for (int i = 0; i < 10; i++) {
smartmore::Clock clk("MinAreaRect");
MinAreaRect<DataType::kFloat32>(input_device, output_device, g_data, height);
}
CUDA_CHECK(hipMemcpy(&actual[0], output_device, 8 * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < 4; i++) {
std::cout << actual[i * 2] << "," << actual[i * 2 + 1] << ";" << std::endl;
}
// for (int i = 0; i < actual.size(); i++)
// {
// REQUIRE(fabs(actual[i] - exptct[i]) <= 0.0001f);
// }
CUDA_CHECK_AND_FREE(input_device);
CUDA_CHECK_AND_FREE(output_device);
} | 5ff7653077c28eb8525287db396f2f1405707332.cu | /*******************************************************************************
* FILENAME: test_image_minarearect.cu
*
* AUTHORS: Chen Fting START DATE: Tuesday August 17th 2021
*
* LAST MODIFIED: Thursday, August 19th 2021, 2:20:57 pm
*
* CONTACT: fting.chen@smartmore.com
*******************************************************************************/
#define CATCH_CONFIG_MAIN
#include <cuda_runtime.h>
#include <cudaop/cudaop.h>
#include <macro.h>
#include <utils.h>
#include <catch2/catch.hpp>
#include <opencv2/opencv.hpp>
TEST_CASE("MinAreaRect", "[minarearect]") {
int height = 2000, width = 2;
std::vector<float> input_data(height * width);
smartmore::RandomFloatVector(input_data);
std::vector<cv::Point2f> points;
for (int i = 0; i < height; i++) {
cv::Point2f point;
point.x = input_data[i * 2];
point.y = input_data[i * 2 + 1];
points.push_back(point);
}
int size = (height * (height - 1) / 2 + 511) / 512 * 11;
cv::RotatedRect box = cv::minAreaRect(points);
cv::Mat expect;
cv::boxPoints(box, expect);
std::cout << expect << std::endl;
std::vector<float> actual(8);
void *input_device = nullptr, *output_device = nullptr, *g_data = nullptr;
CUDA_CHECK(cudaMalloc(&input_device, input_data.size() * sizeof(float)));
CUDA_CHECK(cudaMalloc(&output_device, 8 * sizeof(float)));
CUDA_CHECK(cudaMalloc(&g_data, size * sizeof(float)));
CUDA_CHECK(cudaMemcpy(input_device, input_data.data(), input_data.size() * sizeof(float), cudaMemcpyHostToDevice));
using namespace smartmore::cudaop;
for (int i = 0; i < 10; i++) {
smartmore::Clock clk("MinAreaRect");
MinAreaRect<DataType::kFloat32>(input_device, output_device, g_data, height);
}
CUDA_CHECK(cudaMemcpy(&actual[0], output_device, 8 * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < 4; i++) {
std::cout << actual[i * 2] << "," << actual[i * 2 + 1] << ";" << std::endl;
}
// for (int i = 0; i < actual.size(); i++)
// {
// REQUIRE(fabs(actual[i] - exptct[i]) <= 0.0001f);
// }
CUDA_CHECK_AND_FREE(input_device);
CUDA_CHECK_AND_FREE(output_device);
} |
cd3d2dbbbb2ac965bcd92f5e2d04f6d254528330.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "random/rng.h"
#include "stats/mean.h"
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct MeanInputs {
T tolerance, mean;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MeanInputs<T> &dims) {
return os;
}
template <typename T>
class MeanTest : public ::testing::TestWithParam<MeanInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MeanInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
allocate(data, len);
allocate(mean_act, cols);
r.normal(data, len, params.mean, (T)1.0);
meanSGtest(data);
}
void meanSGtest(T *data) {
int rows = params.rows, cols = params.cols;
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
mean(mean_act, data, cols, rows, params.sample, params.rowMajor, stream);
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(mean_act));
}
protected:
MeanInputs<T> params;
T *data, *mean_act;
};
const std::vector<MeanInputs<float>> inputsf = {
{0.1f, 1.f, 1024, 32, true, false, 1234ULL},
{0.1f, 1.f, 1024, 64, true, false, 1234ULL},
{0.1f, 1.f, 1024, 128, true, false, 1234ULL},
{0.1f, 1.f, 1024, 256, true, false, 1234ULL},
{0.1f, -1.f, 1024, 32, false, false, 1234ULL},
{0.1f, -1.f, 1024, 64, false, false, 1234ULL},
{0.1f, -1.f, 1024, 128, false, false, 1234ULL},
{0.1f, -1.f, 1024, 256, false, false, 1234ULL},
{0.1f, 1.f, 1024, 32, true, true, 1234ULL},
{0.1f, 1.f, 1024, 64, true, true, 1234ULL},
{0.12f, 1.f, 1024, 128, true, true, 1234ULL},
{0.1f, 1.f, 1024, 256, true, true, 1234ULL},
{0.1f, -1.f, 1024, 32, false, true, 1234ULL},
{0.1f, -1.f, 1024, 64, false, true, 1234ULL},
{0.12f, -1.f, 1024, 128, false, true, 1234ULL},
{0.1f, -1.f, 1024, 256, false, true, 1234ULL}};
const std::vector<MeanInputs<double>> inputsd = {
{0.1, 1.0, 1024, 32, true, false, 1234ULL},
{0.1, 1.0, 1024, 64, true, false, 1234ULL},
{0.1, 1.0, 1024, 128, true, false, 1234ULL},
{0.1, 1.0, 1024, 256, true, false, 1234ULL},
{0.1, -1.0, 1024, 32, false, false, 1234ULL},
{0.1, -1.0, 1024, 64, false, false, 1234ULL},
{0.1, -1.0, 1024, 128, false, false, 1234ULL},
{0.1, -1.0, 1024, 256, false, false, 1234ULL},
{0.1, 1.0, 1024, 32, true, true, 1234ULL},
{0.1, 1.0, 1024, 64, true, true, 1234ULL},
{0.1, 1.0, 1024, 128, true, true, 1234ULL},
{0.1, 1.0, 1024, 256, true, true, 1234ULL},
{0.1, -1.0, 1024, 32, false, true, 1234ULL},
{0.1, -1.0, 1024, 64, false, true, 1234ULL},
{0.1, -1.0, 1024, 128, false, true, 1234ULL},
{0.1, -1.0, 1024, 256, false, true, 1234ULL}};
typedef MeanTest<float> MeanTestF;
TEST_P(MeanTestF, Result) {
ASSERT_TRUE(devArrMatch(params.mean, mean_act, params.cols,
CompareApprox<float>(params.tolerance)));
}
typedef MeanTest<double> MeanTestD;
TEST_P(MeanTestD, Result) {
ASSERT_TRUE(devArrMatch(params.mean, mean_act, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanTests, MeanTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MeanTests, MeanTestD, ::testing::ValuesIn(inputsd));
} // end namespace Stats
} // end namespace MLCommon
| cd3d2dbbbb2ac965bcd92f5e2d04f6d254528330.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "random/rng.h"
#include "stats/mean.h"
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct MeanInputs {
T tolerance, mean;
int rows, cols;
bool sample, rowMajor;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MeanInputs<T> &dims) {
return os;
}
template <typename T>
class MeanTest : public ::testing::TestWithParam<MeanInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MeanInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
allocate(data, len);
allocate(mean_act, cols);
r.normal(data, len, params.mean, (T)1.0);
meanSGtest(data);
}
void meanSGtest(T *data) {
int rows = params.rows, cols = params.cols;
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
mean(mean_act, data, cols, rows, params.sample, params.rowMajor, stream);
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(mean_act));
}
protected:
MeanInputs<T> params;
T *data, *mean_act;
};
const std::vector<MeanInputs<float>> inputsf = {
{0.1f, 1.f, 1024, 32, true, false, 1234ULL},
{0.1f, 1.f, 1024, 64, true, false, 1234ULL},
{0.1f, 1.f, 1024, 128, true, false, 1234ULL},
{0.1f, 1.f, 1024, 256, true, false, 1234ULL},
{0.1f, -1.f, 1024, 32, false, false, 1234ULL},
{0.1f, -1.f, 1024, 64, false, false, 1234ULL},
{0.1f, -1.f, 1024, 128, false, false, 1234ULL},
{0.1f, -1.f, 1024, 256, false, false, 1234ULL},
{0.1f, 1.f, 1024, 32, true, true, 1234ULL},
{0.1f, 1.f, 1024, 64, true, true, 1234ULL},
{0.12f, 1.f, 1024, 128, true, true, 1234ULL},
{0.1f, 1.f, 1024, 256, true, true, 1234ULL},
{0.1f, -1.f, 1024, 32, false, true, 1234ULL},
{0.1f, -1.f, 1024, 64, false, true, 1234ULL},
{0.12f, -1.f, 1024, 128, false, true, 1234ULL},
{0.1f, -1.f, 1024, 256, false, true, 1234ULL}};
const std::vector<MeanInputs<double>> inputsd = {
{0.1, 1.0, 1024, 32, true, false, 1234ULL},
{0.1, 1.0, 1024, 64, true, false, 1234ULL},
{0.1, 1.0, 1024, 128, true, false, 1234ULL},
{0.1, 1.0, 1024, 256, true, false, 1234ULL},
{0.1, -1.0, 1024, 32, false, false, 1234ULL},
{0.1, -1.0, 1024, 64, false, false, 1234ULL},
{0.1, -1.0, 1024, 128, false, false, 1234ULL},
{0.1, -1.0, 1024, 256, false, false, 1234ULL},
{0.1, 1.0, 1024, 32, true, true, 1234ULL},
{0.1, 1.0, 1024, 64, true, true, 1234ULL},
{0.1, 1.0, 1024, 128, true, true, 1234ULL},
{0.1, 1.0, 1024, 256, true, true, 1234ULL},
{0.1, -1.0, 1024, 32, false, true, 1234ULL},
{0.1, -1.0, 1024, 64, false, true, 1234ULL},
{0.1, -1.0, 1024, 128, false, true, 1234ULL},
{0.1, -1.0, 1024, 256, false, true, 1234ULL}};
typedef MeanTest<float> MeanTestF;
TEST_P(MeanTestF, Result) {
ASSERT_TRUE(devArrMatch(params.mean, mean_act, params.cols,
CompareApprox<float>(params.tolerance)));
}
typedef MeanTest<double> MeanTestD;
TEST_P(MeanTestD, Result) {
ASSERT_TRUE(devArrMatch(params.mean, mean_act, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanTests, MeanTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MeanTests, MeanTestD, ::testing::ValuesIn(inputsd));
} // end namespace Stats
} // end namespace MLCommon
|
bc8cd4103f655125c2ec7743b05e66b083d7094d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "CudaObject.h"
namespace gpu_cuda {
__global__ void calcLeakyReluForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
if ( v < 0 ){
v = 0.01;
}
out[id] = v;
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
float v = in.data[i];
if ( v < 0 ){
v = 0.01;
}
out.data[i] = v;
}
*/
}
__global__ void calcLeakyReluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
dz_in[id] += dz_next_layer[id];
dz[id] += (in[id] < 0) ? (0.01) : (dz_in[id]);
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
dz.data[i] += (in.data[i] < 0) ? (0.01) : (1.0 * dz_in.data[i]);
}
*/
}
void leakyReluForwardGPU(float *in, float *out, int N)
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( calcLeakyReluForwardGPU), dim3(grid), dim3(BLOCK), 0, 0, in, out, N);
}
void leakyReluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
hipLaunchKernelGGL(( calcLeakyReluBackwardGPU), dim3(grid), dim3(BLOCK), 0, 0, dz_next_layer, dz_in, dz, in, N );
}
} // namespace gpu
| bc8cd4103f655125c2ec7743b05e66b083d7094d.cu | #include <stdio.h>
#include "CudaObject.h"
namespace gpu_cuda {
__global__ void calcLeakyReluForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
if ( v < 0 ){
v = 0.01;
}
out[id] = v;
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
float v = in.data[i];
if ( v < 0 ){
v = 0.01;
}
out.data[i] = v;
}
*/
}
__global__ void calcLeakyReluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
dz_in[id] += dz_next_layer[id];
dz[id] += (in[id] < 0) ? (0.01) : (dz_in[id]);
}
/* original
for( unsigned i = 0; i < data_size; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
dz.data[i] += (in.data[i] < 0) ? (0.01) : (1.0 * dz_in.data[i]);
}
*/
}
void leakyReluForwardGPU(float *in, float *out, int N)
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
calcLeakyReluForwardGPU<<<grid, BLOCK>>>(in, out, N);
}
void leakyReluBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int N )
{
CudaObject cuda = CudaObject();
dim3 grid = cuda.cudaGridSize(N);
calcLeakyReluBackwardGPU<<<grid, BLOCK>>>( dz_next_layer, dz_in, dz, in, N );
}
} // namespace gpu
|
26505d9cccc239ad3abbb158b25ca2a9643efd61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <op_boilerplate.h>
#include <pointercast.h>
#include <helpers/TAD.h>
#include <types/float16.h>
#include <loops/grid_shaped.h>
#include <helpers/DebugHelper.h>
#include <ops/meta_ops.h>
#include <loops/legacy_ops.h>
#define GRID_WIDTH 19 // number of pointers within single grid row
template <typename T>
__device__ inline static void metaPredicateShapeGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB,
Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) {
__shared__ Nd4jPointer params[2];
__shared__ T *paramsPtr;
if (threadIdx.x == 0) {
if (opTypeA == 0) {
params[0] = (Nd4jPointer *) &scalarA;
}
else params[0] = (Nd4jPointer *) extraA;
if (opTypeB == 0) {
params[1] = (Nd4jPointer *) &scalarB;
}
else params[1] = (Nd4jPointer *) extraB;
paramsPtr = (T *) params;
}
__syncthreads();
if (opTypeA == 2) {
if (opTypeB == 0) {
// DISPATCH_METAOP(functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda, PARAMS(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr), InvertedMetaOp, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
// functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda<simdOps::InvertedMetaOp<T, simdOps::Copy<T>, simdOps::Multiply<T>>>(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr);
}
}
}
template<typename T, typename OpClass>
__device__ static inline void invertedMetaPairwiseShapedGeneric(const int opTypeA, const int opTypeB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) {
__shared__ Nd4jPointer params[2];
__shared__ T *paramsPtr;
if (threadIdx.x == 0) {
if (opTypeA == 0) {
params[0] = (Nd4jPointer *) &scalarA;
}
else params[0] = (Nd4jPointer *) extraA;
if (opTypeB == 0) {
params[1] = (Nd4jPointer *) &scalarB;
}
else params[1] = (Nd4jPointer *) extraB;
paramsPtr = (T *) params;
}
__syncthreads();
functions::grid::GRIDShaped<T>::template transformCuda<OpClass>(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr);
};
template<typename T, typename OpClass>
__device__ static inline void invertedMetaPairwiseShapedGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) {
__shared__ Nd4jPointer params[2];
__shared__ T *paramsPtr;
if (threadIdx.x == 0) {
if (opTypeA == 0) {
params[0] = (Nd4jPointer *) &scalarA;
}
else params[0] = (Nd4jPointer *) extraA;
if (opTypeB == 0) {
params[1] = (Nd4jPointer *) &scalarB;
}
else params[1] = (Nd4jPointer *) extraB;
paramsPtr = (T *) params;
}
__syncthreads();
functions::grid::GRIDShaped<T>::template transformCuda<OpClass>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr);
};
template<typename T>
__device__ static inline void invertedMetaPairwiseShapedNumericGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) {
__shared__ Nd4jPointer params[2];
__shared__ T *paramsPtr;
if (threadIdx.x == 0) {
if (opTypeA == 0) {
params[0] = (Nd4jPointer *) &scalarA;
}
else params[0] = (Nd4jPointer *) extraA;
if (opTypeB == 0) {
params[1] = (Nd4jPointer *) &scalarB;
}
else params[1] = (Nd4jPointer *) extraB;
paramsPtr = (T *) params;
}
__syncthreads();
functions::grid::GRIDShaped<T>::transformCuda(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr);
};
extern "C" __global__ void invertedMetaPairwiseShapedNumericFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) {
invertedMetaPairwiseShapedNumericGeneric<float>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
}
extern "C" __global__ void invertedMetaPairwiseShapedNumericDouble(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *dz, Nd4jLong *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) {
invertedMetaPairwiseShapedNumericGeneric<double>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
}
extern "C" __global__ void invertedMetaPairwiseShapedNumericHalf(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *dz, Nd4jLong *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB) {
invertedMetaPairwiseShapedNumericGeneric<float16>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
}
#ifndef __CLION_IDE__
// kernels set for pairwise + scalar based on shape
//DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, float, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS))
//DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, double, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS))
//DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, float16, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS))
#endif
namespace functions {
namespace grid {
__device__ void _ind2subC(int rank, Nd4jLong *shape, Nd4jLong idx, Nd4jLong *coords) {
shape::ind2subC(rank, shape, idx, coords);
}
__device__ Nd4jLong _getOffset(Nd4jLong offset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *coords, int rank) {
return shape::getOffset(offset, shape, stride, coords, rank);
}
__device__ Nd4jLong* _shapeOf(Nd4jLong *shape) {
return shape::shapeOf(shape);
}
__device__ Nd4jLong* _stride(Nd4jLong *shape) {
return shape::stride(shape);
}
__device__ int _rank(Nd4jLong* shape) {
return shape::rank(shape);
}
/**
* This method is able to execute various ops that takes 2 operands (x, y) + extras
* @tparam T
*/
template <typename T>
__device__ T _execute_2OE(const int opType, const int opNum, T x, T y, T *extras) {
T z;
switch(opType) {
case 2: {
EXECUTE_NOE((x, y, extras), OPS_A(PAIRWISE_TRANSFORM_OPS));
};
break;
default: {
PRINT_FIRST("Unknown opType provided: [%i]\n", opType);
}
break;
}
return z;
}
/**
* This method is able to execute various ops that takes 1 operand (x) + extras
* @tparam T
*/
template <typename T>
__device__ T _execute_1OE(const int opType, const int opNum, T x, T *extras) {
T z;
switch(opType) {
case 0: {
EXECUTE_NOE((x, extras), OPS_A(SCALAR_OPS));
}
break;
default: {
PRINT_FIRST("Unknown opType provided: [%i]\n", opType);
}
break;
}
return z;
}
template <typename T>
__device__ T _invertedOpExecutorA(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, T x, T y, T *extras) {
// this code is basically InvertedMetaOp, reorganized to suit per-type execution
Nd4jPointer *wrap = reinterpret_cast<Nd4jPointer *> (extras);
T *paramsA = reinterpret_cast<T *> (wrap[0]);
T *paramsB = reinterpret_cast<T *> (wrap[1]);
T intermediate;
// Executing first op, opA
intermediate = _execute_2OE<T>(opTypeA, opNumA, x, y, paramsA);
// Executing second op, opB
intermediate = _execute_1OE<T>(opTypeB, opNumB, intermediate, paramsB);
// just returning result now
return intermediate;
}
template<typename T>
__device__ void GRIDShaped<T>::transformCuda(int opTypeA, int opNumA, int opTypeB, int opNumB, T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int xRank;
__shared__ int yRank;
__shared__ int resultRank;
__shared__ Nd4jLong n;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *yShape;
__shared__ Nd4jLong *zShape;
__shared__ Nd4jLong *xStride;
__shared__ Nd4jLong *yStride;
__shared__ Nd4jLong *zStride;
if (threadIdx.x == 0) {
xRank = _rank(xShapeBuffer);
yRank = _rank(yShapeBuffer);
resultRank = _rank(resultShapeBuffer);
n = shape::length(xShapeBuffer);
xShape = _shapeOf(xShapeBuffer);
yShape = _shapeOf(yShapeBuffer);
if (dx != result) {
zShape = _shapeOf(resultShapeBuffer);
zStride = _stride(resultShapeBuffer);
}
xStride = _stride(xShapeBuffer);
yStride = _stride(yShapeBuffer);
}
__syncthreads();
if (dx == result) {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) {
_ind2subC(xRank, xShape, i, xCoord);
_ind2subC(yRank, yShape, i, yCoord);
auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank);
auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank);
result[xOffset] = _invertedOpExecutorA(opTypeA, opNumA, opTypeB, opNumB, dx[xOffset], y[yOffset], extraParams); //OpType::op(dx[xOffset], y[yOffset], extraParams);
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
Nd4jLong resultCoord[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) {
_ind2subC(xRank, xShape, i, xCoord);
_ind2subC(yRank, yShape, i, yCoord);
_ind2subC(resultRank, zShape, i, resultCoord);
auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank);
auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank);
auto resultOffset = _getOffset(0, zShape, zStride, resultCoord, resultRank);
result[0] = _invertedOpExecutorA(opTypeA, opNumA, opTypeB, opNumB, dx[xOffset], y[yOffset], extraParams); //OpType::op(dx[xOffset], y[yOffset], extraParams);
}
}
}
template<typename T>
template<typename OpType>
__device__ void GRIDShaped<T>::transformCuda(T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int xRank;
__shared__ int yRank;
__shared__ int resultRank;
__shared__ Nd4jLong n;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *yShape;
__shared__ Nd4jLong *zShape;
__shared__ Nd4jLong *xStride;
__shared__ Nd4jLong *yStride;
__shared__ Nd4jLong *zStride;
if (threadIdx.x == 0) {
xRank = _rank(xShapeBuffer);
yRank = _rank(yShapeBuffer);
resultRank = _rank(resultShapeBuffer);
n = shape::length(xShapeBuffer);
xShape = _shapeOf(xShapeBuffer);
yShape = _shapeOf(yShapeBuffer);
if (dx != result) {
zShape = _shapeOf(resultShapeBuffer);
zStride = _stride(resultShapeBuffer);
}
xStride = _stride(xShapeBuffer);
yStride = _stride(yShapeBuffer);
}
__syncthreads();
if (dx == result) {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) {
_ind2subC(xRank, xShape, i, xCoord);
_ind2subC(yRank, yShape, i, yCoord);
auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank);
auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank);
result[xOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams);
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
Nd4jLong resultCoord[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) {
_ind2subC(xRank, xShape, i, xCoord);
_ind2subC(yRank, yShape, i, yCoord);
_ind2subC(resultRank, zShape, i, resultCoord);
auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank);
auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank);
auto resultOffset = _getOffset(0, zShape, zStride, resultCoord, resultRank);
result[resultOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams);
}
}
}
template <>
void GRIDShaped<float>::execMetaPredicateShaped(hipStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) {
hipLaunchKernelGGL(( invertedMetaPairwiseShapedNumericFloat), dim3(128), dim3(1024), 2048, *stream, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
template <>
void GRIDShaped<float16>::execMetaPredicateShaped(hipStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *dz, Nd4jLong *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB) {
hipLaunchKernelGGL(( invertedMetaPairwiseShapedNumericHalf), dim3(128), dim3(1024), 2048, *stream, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumB);
}
template <>
void GRIDShaped<double>::execMetaPredicateShaped(hipStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *dz, Nd4jLong *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) {
hipLaunchKernelGGL(( invertedMetaPairwiseShapedNumericDouble), dim3(128), dim3(1024), 2048, *stream, opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
}
} | 26505d9cccc239ad3abbb158b25ca2a9643efd61.cu |
#include <op_boilerplate.h>
#include <pointercast.h>
#include <helpers/TAD.h>
#include <types/float16.h>
#include <loops/grid_shaped.h>
#include <helpers/DebugHelper.h>
#include <ops/meta_ops.h>
#include <loops/legacy_ops.h>
#define GRID_WIDTH 19 // number of pointers within single grid row
template <typename T>
__device__ inline static void metaPredicateShapeGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB,
Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) {
__shared__ Nd4jPointer params[2];
__shared__ T *paramsPtr;
if (threadIdx.x == 0) {
if (opTypeA == 0) {
params[0] = (Nd4jPointer *) &scalarA;
}
else params[0] = (Nd4jPointer *) extraA;
if (opTypeB == 0) {
params[1] = (Nd4jPointer *) &scalarB;
}
else params[1] = (Nd4jPointer *) extraB;
paramsPtr = (T *) params;
}
__syncthreads();
if (opTypeA == 2) {
if (opTypeB == 0) {
// DISPATCH_METAOP(functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda, PARAMS(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr), InvertedMetaOp, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS));
// functions::pairwise_transforms::PairWiseTransform<T>::template transformCuda<simdOps::InvertedMetaOp<T, simdOps::Copy<T>, simdOps::Multiply<T>>>(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr);
}
}
}
template<typename T, typename OpClass>
__device__ static inline void invertedMetaPairwiseShapedGeneric(const int opTypeA, const int opTypeB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) {
__shared__ Nd4jPointer params[2];
__shared__ T *paramsPtr;
if (threadIdx.x == 0) {
if (opTypeA == 0) {
params[0] = (Nd4jPointer *) &scalarA;
}
else params[0] = (Nd4jPointer *) extraA;
if (opTypeB == 0) {
params[1] = (Nd4jPointer *) &scalarB;
}
else params[1] = (Nd4jPointer *) extraB;
paramsPtr = (T *) params;
}
__syncthreads();
functions::grid::GRIDShaped<T>::template transformCuda<OpClass>(dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr);
};
template<typename T, typename OpClass>
__device__ static inline void invertedMetaPairwiseShapedGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) {
__shared__ Nd4jPointer params[2];
__shared__ T *paramsPtr;
if (threadIdx.x == 0) {
if (opTypeA == 0) {
params[0] = (Nd4jPointer *) &scalarA;
}
else params[0] = (Nd4jPointer *) extraA;
if (opTypeB == 0) {
params[1] = (Nd4jPointer *) &scalarB;
}
else params[1] = (Nd4jPointer *) extraB;
paramsPtr = (T *) params;
}
__syncthreads();
functions::grid::GRIDShaped<T>::template transformCuda<OpClass>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr);
};
template<typename T>
__device__ static inline void invertedMetaPairwiseShapedNumericGeneric(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, T *dx, Nd4jLong *xShapeInfo, T *dy, Nd4jLong *yShapeInfo, T *dz, Nd4jLong *zShapeInfo, T *extraA, T *extraB, T scalarA, T scalarB) {
__shared__ Nd4jPointer params[2];
__shared__ T *paramsPtr;
if (threadIdx.x == 0) {
if (opTypeA == 0) {
params[0] = (Nd4jPointer *) &scalarA;
}
else params[0] = (Nd4jPointer *) extraA;
if (opTypeB == 0) {
params[1] = (Nd4jPointer *) &scalarB;
}
else params[1] = (Nd4jPointer *) extraB;
paramsPtr = (T *) params;
}
__syncthreads();
functions::grid::GRIDShaped<T>::transformCuda(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, paramsPtr, nullptr, nullptr, nullptr);
};
extern "C" __global__ void invertedMetaPairwiseShapedNumericFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) {
invertedMetaPairwiseShapedNumericGeneric<float>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
}
extern "C" __global__ void invertedMetaPairwiseShapedNumericDouble(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *dz, Nd4jLong *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) {
invertedMetaPairwiseShapedNumericGeneric<double>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
}
extern "C" __global__ void invertedMetaPairwiseShapedNumericHalf(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *dz, Nd4jLong *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB) {
invertedMetaPairwiseShapedNumericGeneric<float16>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
}
#ifndef __CLION_IDE__
// kernels set for pairwise + scalar based on shape
//DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, float, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS))
//DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, double, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS))
//DISPATCH_KERNEL_META(invertedMetaPairwiseShaped_Pairwise_Scalar_, invertedMetaPairwiseShapedGeneric, float16, metaOps::InvertedMetaOp, INPUT(const int opTypeA, const int opTypeB, Nd4jLong N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB), PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS))
#endif
namespace functions {
namespace grid {
__device__ void _ind2subC(int rank, Nd4jLong *shape, Nd4jLong idx, Nd4jLong *coords) {
shape::ind2subC(rank, shape, idx, coords);
}
__device__ Nd4jLong _getOffset(Nd4jLong offset, Nd4jLong *shape, Nd4jLong *stride, Nd4jLong *coords, int rank) {
return shape::getOffset(offset, shape, stride, coords, rank);
}
__device__ Nd4jLong* _shapeOf(Nd4jLong *shape) {
return shape::shapeOf(shape);
}
__device__ Nd4jLong* _stride(Nd4jLong *shape) {
return shape::stride(shape);
}
__device__ int _rank(Nd4jLong* shape) {
return shape::rank(shape);
}
/**
* This method is able to execute various ops that takes 2 operands (x, y) + extras
* @tparam T
*/
template <typename T>
__device__ T _execute_2OE(const int opType, const int opNum, T x, T y, T *extras) {
T z;
switch(opType) {
case 2: {
EXECUTE_NOE((x, y, extras), OPS_A(PAIRWISE_TRANSFORM_OPS));
};
break;
default: {
PRINT_FIRST("Unknown opType provided: [%i]\n", opType);
}
break;
}
return z;
}
/**
* This method is able to execute various ops that takes 1 operand (x) + extras
* @tparam T
*/
template <typename T>
__device__ T _execute_1OE(const int opType, const int opNum, T x, T *extras) {
T z;
switch(opType) {
case 0: {
EXECUTE_NOE((x, extras), OPS_A(SCALAR_OPS));
}
break;
default: {
PRINT_FIRST("Unknown opType provided: [%i]\n", opType);
}
break;
}
return z;
}
template <typename T>
__device__ T _invertedOpExecutorA(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, T x, T y, T *extras) {
// this code is basically InvertedMetaOp, reorganized to suit per-type execution
Nd4jPointer *wrap = reinterpret_cast<Nd4jPointer *> (extras);
T *paramsA = reinterpret_cast<T *> (wrap[0]);
T *paramsB = reinterpret_cast<T *> (wrap[1]);
T intermediate;
// Executing first op, opA
intermediate = _execute_2OE<T>(opTypeA, opNumA, x, y, paramsA);
// Executing second op, opB
intermediate = _execute_1OE<T>(opTypeB, opNumB, intermediate, paramsB);
// just returning result now
return intermediate;
}
template<typename T>
__device__ void GRIDShaped<T>::transformCuda(int opTypeA, int opNumA, int opTypeB, int opNumB, T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int xRank;
__shared__ int yRank;
__shared__ int resultRank;
__shared__ Nd4jLong n;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *yShape;
__shared__ Nd4jLong *zShape;
__shared__ Nd4jLong *xStride;
__shared__ Nd4jLong *yStride;
__shared__ Nd4jLong *zStride;
if (threadIdx.x == 0) {
xRank = _rank(xShapeBuffer);
yRank = _rank(yShapeBuffer);
resultRank = _rank(resultShapeBuffer);
n = shape::length(xShapeBuffer);
xShape = _shapeOf(xShapeBuffer);
yShape = _shapeOf(yShapeBuffer);
if (dx != result) {
zShape = _shapeOf(resultShapeBuffer);
zStride = _stride(resultShapeBuffer);
}
xStride = _stride(xShapeBuffer);
yStride = _stride(yShapeBuffer);
}
__syncthreads();
if (dx == result) {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) {
_ind2subC(xRank, xShape, i, xCoord);
_ind2subC(yRank, yShape, i, yCoord);
auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank);
auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank);
result[xOffset] = _invertedOpExecutorA(opTypeA, opNumA, opTypeB, opNumB, dx[xOffset], y[yOffset], extraParams); //OpType::op(dx[xOffset], y[yOffset], extraParams);
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
Nd4jLong resultCoord[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) {
_ind2subC(xRank, xShape, i, xCoord);
_ind2subC(yRank, yShape, i, yCoord);
_ind2subC(resultRank, zShape, i, resultCoord);
auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank);
auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank);
auto resultOffset = _getOffset(0, zShape, zStride, resultCoord, resultRank);
result[0] = _invertedOpExecutorA(opTypeA, opNumA, opTypeB, opNumB, dx[xOffset], y[yOffset], extraParams); //OpType::op(dx[xOffset], y[yOffset], extraParams);
}
}
}
template<typename T>
template<typename OpType>
__device__ void GRIDShaped<T>::transformCuda(T *dx, Nd4jLong *xShapeBuffer, T *y, Nd4jLong *yShapeBuffer, T *result, Nd4jLong *resultShapeBuffer, T *extraParams, int *allocationPointer, UnifiedSharedMemory *manager, Nd4jLong *tadOnlyShapeInfo) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int xRank;
__shared__ int yRank;
__shared__ int resultRank;
__shared__ Nd4jLong n;
__shared__ Nd4jLong *xShape;
__shared__ Nd4jLong *yShape;
__shared__ Nd4jLong *zShape;
__shared__ Nd4jLong *xStride;
__shared__ Nd4jLong *yStride;
__shared__ Nd4jLong *zStride;
if (threadIdx.x == 0) {
xRank = _rank(xShapeBuffer);
yRank = _rank(yShapeBuffer);
resultRank = _rank(resultShapeBuffer);
n = shape::length(xShapeBuffer);
xShape = _shapeOf(xShapeBuffer);
yShape = _shapeOf(yShapeBuffer);
if (dx != result) {
zShape = _shapeOf(resultShapeBuffer);
zStride = _stride(resultShapeBuffer);
}
xStride = _stride(xShapeBuffer);
yStride = _stride(yShapeBuffer);
}
__syncthreads();
if (dx == result) {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) {
_ind2subC(xRank, xShape, i, xCoord);
_ind2subC(yRank, yShape, i, yCoord);
auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank);
auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank);
result[xOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams);
}
} else {
Nd4jLong xCoord[MAX_RANK];
Nd4jLong yCoord[MAX_RANK];
Nd4jLong resultCoord[MAX_RANK];
for (Nd4jLong i = tid; i < n; i += gridDim.x * blockDim.x) {
_ind2subC(xRank, xShape, i, xCoord);
_ind2subC(yRank, yShape, i, yCoord);
_ind2subC(resultRank, zShape, i, resultCoord);
auto xOffset = _getOffset(0, xShape, xStride, xCoord, xRank);
auto yOffset = _getOffset(0, yShape, yStride, yCoord, yRank);
auto resultOffset = _getOffset(0, zShape, zStride, resultCoord, resultRank);
result[resultOffset] = OpType::op(dx[xOffset], y[yOffset], extraParams);
}
}
}
template <>
void GRIDShaped<float>::execMetaPredicateShaped(cudaStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float *dx, Nd4jLong *xShapeInfo, float *dy, Nd4jLong *yShapeInfo, float *dz, Nd4jLong *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) {
invertedMetaPairwiseShapedNumericFloat<<<128, 1024, 2048, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
template <>
void GRIDShaped<float16>::execMetaPredicateShaped(cudaStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, float16 *dx, Nd4jLong *xShapeInfo, float16 *dy, Nd4jLong *yShapeInfo, float16 *dz, Nd4jLong *zShapeInfo, float16 *extraA, float16 *extraB, float16 scalarA, float16 scalarB) {
invertedMetaPairwiseShapedNumericHalf<<<128, 1024, 2048, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumB);
}
template <>
void GRIDShaped<double>::execMetaPredicateShaped(cudaStream_t * stream, Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, Nd4jLong N, double *dx, Nd4jLong *xShapeInfo, double *dy, Nd4jLong *yShapeInfo, double *dz, Nd4jLong *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) {
invertedMetaPairwiseShapedNumericDouble<<<128, 1024, 2048, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB);
DEBUG_KERNEL(stream, opNumA);
}
}
} |
845a555b284d877962f305c041e064ea550501c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
*/
template <typename U, typename V>
constexpr __host__ __device__ auto divUp(U a, V b) -> decltype(a + b) {
return (a + b - 1) / b;
}
template<int FS, int SB, int padding_l, typename scalar_t>
__inline__ __device__
void zeroSharedMem(scalar_t* data) {
/*
Given an array of length FS + SB, zero out the first padding_l and last
(FS - padding_l) values in the array
*/
int tid = threadIdx.x;
if (FS < SB) {
// zero all if we have enough threads in a block to do all of them
if (tid < padding_l || tid > SB - FS + padding_l - 1) {
data[tid] = scalar_t(0.0);
}
} else {
// otherwise zero out one block at a time
const int numIterations = divUp<int, int>(FS, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if (tid + offset < padding_l) {
data[tid + offset] = scalar_t(0.0);
} else if (tid + offset < FS) {
data[SB + tid + offset] = scalar_t(0.0);
}
}
}
}
template<typename scalar_t>
__inline__ __device__
scalar_t warpReduce(scalar_t data) {
/*
Reduce an array within each warp. After processing all values in warp will
caontain the sum of all original values in that warp.
data - pointer to data to reduce
*/
data += __shfl_xor_sync(SHFL_MASK, data, 16);
data += __shfl_xor_sync(SHFL_MASK, data, 8);
data += __shfl_xor_sync(SHFL_MASK, data, 4);
data += __shfl_xor_sync(SHFL_MASK, data, 2);
data += __shfl_xor_sync(SHFL_MASK, data, 1);
return data;
}
template<typename scalar_t>
__inline__ __device__
scalar_t blockReduce(scalar_t data) {
/*
Reduce an entire array on the block level. After processing, the
first value in the array will contain the reduced sum.
data - pointer to data to reduce
*/
static __shared__ scalar_t warpSum[32];
const int tid = threadIdx.x;
int wid = tid / 32;
int lane = tid % 32;
__syncthreads();
// reduce each warp then write to shared memory
scalar_t sum = warpReduce(data);
if (lane == 0) {
warpSum[wid] = sum;
}
__syncthreads();
scalar_t v;
// perform final sum of partial warp sums
if (tid < blockDim.x / 32) {
v = warpSum[lane];
} else {
v = scalar_t(0.0);
}
if (wid == 0) {
v = warpReduce(v);
}
__syncthreads();
return v;
}
void checkCudaStatus(hipError_t status, int lineNumber = -1) {
if (status != hipSuccess) {
std::cout << hipGetErrorString(status)
<< " at line " << lineNumber << std::endl;
std::cout << "Exiting" << std::endl;
exit(1);
}
}
template<int FS, int SB, int padding_l, typename scalar_t>
__device__
void load_input_to_shared(const scalar_t* input, // global memory
int inputOffset, int sequenceLength,
int iteration, int numIterations,
bool no_prev, scalar_t* output /* shared memory */) {
/*
Load a block size of input into shared memory with
right and left overhang of total size FS. If previously
loaded memory, overlap will be shifted over to reduce
global memory access
input - pointer to start of channel sequence
inputOffset - how far in the sequence to start loading
sequenceLength - total length of sequence
iteration - which block of sequence we are loading
numIterations - total number of blocks to load
no_prev - whether to load the whole block if the previous block
wasn't loaded
output - shared memory to write input to
*/
const int tid = threadIdx.x;
// Load the left "overhang" of input
if (iteration > 0) {
if (padding_l < SB) {
// load all at once
if (tid < padding_l) {
output[tid] = (no_prev) ? input[inputOffset - padding_l + tid] : output[tid + SB];
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < padding_l) {
output[tid + offset] = (no_prev) ? input[inputOffset - padding_l + tid + offset] : output[tid + offset + SB];
}
}
}
}
// Load the right "overhang" of input
if (iteration < (numIterations - 1)) {
const int elementsLeft = sequenceLength - (iteration+1) * SB;
if ((FS - padding_l) < SB) {
// load all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = (tid < elementsLeft) ? input[inputOffset + SB + tid] : scalar_t(0.0);
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = ((tid + offset) < elementsLeft) ? input[inputOffset + SB + tid + offset] : scalar_t(0.0);
}
}
}
}
// We should also clear out the right "overhang"
if (iteration == (numIterations - 1)) {
if ((FS - padding_l) < SB) {
// clear out all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = scalar_t(0.0);
}
} else {
// clear in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = scalar_t(0.0);
}
}
}
}
output[tid + padding_l] = ((inputOffset + tid) < sequenceLength) ? input[inputOffset + tid] : scalar_t(0.0);
}
| 845a555b284d877962f305c041e064ea550501c1.cu | /**
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
*/
template <typename U, typename V>
constexpr __host__ __device__ auto divUp(U a, V b) -> decltype(a + b) {
return (a + b - 1) / b;
}
template<int FS, int SB, int padding_l, typename scalar_t>
__inline__ __device__
void zeroSharedMem(scalar_t* data) {
/*
Given an array of length FS + SB, zero out the first padding_l and last
(FS - padding_l) values in the array
*/
int tid = threadIdx.x;
if (FS < SB) {
// zero all if we have enough threads in a block to do all of them
if (tid < padding_l || tid > SB - FS + padding_l - 1) {
data[tid] = scalar_t(0.0);
}
} else {
// otherwise zero out one block at a time
const int numIterations = divUp<int, int>(FS, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if (tid + offset < padding_l) {
data[tid + offset] = scalar_t(0.0);
} else if (tid + offset < FS) {
data[SB + tid + offset] = scalar_t(0.0);
}
}
}
}
template<typename scalar_t>
__inline__ __device__
scalar_t warpReduce(scalar_t data) {
/*
Reduce an array within each warp. After processing all values in warp will
caontain the sum of all original values in that warp.
data - pointer to data to reduce
*/
data += __shfl_xor_sync(SHFL_MASK, data, 16);
data += __shfl_xor_sync(SHFL_MASK, data, 8);
data += __shfl_xor_sync(SHFL_MASK, data, 4);
data += __shfl_xor_sync(SHFL_MASK, data, 2);
data += __shfl_xor_sync(SHFL_MASK, data, 1);
return data;
}
template<typename scalar_t>
__inline__ __device__
scalar_t blockReduce(scalar_t data) {
/*
Reduce an entire array on the block level. After processing, the
first value in the array will contain the reduced sum.
data - pointer to data to reduce
*/
static __shared__ scalar_t warpSum[32];
const int tid = threadIdx.x;
int wid = tid / 32;
int lane = tid % 32;
__syncthreads();
// reduce each warp then write to shared memory
scalar_t sum = warpReduce(data);
if (lane == 0) {
warpSum[wid] = sum;
}
__syncthreads();
scalar_t v;
// perform final sum of partial warp sums
if (tid < blockDim.x / 32) {
v = warpSum[lane];
} else {
v = scalar_t(0.0);
}
if (wid == 0) {
v = warpReduce(v);
}
__syncthreads();
return v;
}
void checkCudaStatus(cudaError_t status, int lineNumber = -1) {
if (status != cudaSuccess) {
std::cout << cudaGetErrorString(status)
<< " at line " << lineNumber << std::endl;
std::cout << "Exiting" << std::endl;
exit(1);
}
}
template<int FS, int SB, int padding_l, typename scalar_t>
__device__
void load_input_to_shared(const scalar_t* input, // global memory
int inputOffset, int sequenceLength,
int iteration, int numIterations,
bool no_prev, scalar_t* output /* shared memory */) {
/*
Load a block size of input into shared memory with
right and left overhang of total size FS. If previously
loaded memory, overlap will be shifted over to reduce
global memory access
input - pointer to start of channel sequence
inputOffset - how far in the sequence to start loading
sequenceLength - total length of sequence
iteration - which block of sequence we are loading
numIterations - total number of blocks to load
no_prev - whether to load the whole block if the previous block
wasn't loaded
output - shared memory to write input to
*/
const int tid = threadIdx.x;
// Load the left "overhang" of input
if (iteration > 0) {
if (padding_l < SB) {
// load all at once
if (tid < padding_l) {
output[tid] = (no_prev) ? input[inputOffset - padding_l + tid] : output[tid + SB];
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < padding_l) {
output[tid + offset] = (no_prev) ? input[inputOffset - padding_l + tid + offset] : output[tid + offset + SB];
}
}
}
}
// Load the right "overhang" of input
if (iteration < (numIterations - 1)) {
const int elementsLeft = sequenceLength - (iteration+1) * SB;
if ((FS - padding_l) < SB) {
// load all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = (tid < elementsLeft) ? input[inputOffset + SB + tid] : scalar_t(0.0);
}
} else {
// load in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = ((tid + offset) < elementsLeft) ? input[inputOffset + SB + tid + offset] : scalar_t(0.0);
}
}
}
}
// We should also clear out the right "overhang"
if (iteration == (numIterations - 1)) {
if ((FS - padding_l) < SB) {
// clear out all at once
if (tid < (FS - padding_l)) {
output[padding_l + SB + tid] = scalar_t(0.0);
}
} else {
// clear in chunks of size SB
int numIterations = divUp<int, int>(FS - padding_l, SB);
for (int i = 0; i < numIterations; i++) {
int offset = i * SB;
if ((tid + offset) < (FS - padding_l)) {
output[padding_l + SB + tid + offset] = scalar_t(0.0);
}
}
}
}
output[tid + padding_l] = ((inputOffset + tid) < sequenceLength) ? input[inputOffset + tid] : scalar_t(0.0);
}
|
3a19debf3282fa130de0da640b163469d9620392.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
# =============================================================================
# Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
#
# See top-level LICENSE file for details.
# =============================================================================
*/
#include "coreneuron/utils/utils_cuda.h"
#include "coreneuron/permute/cellorder.hpp"
#include "coreneuron/network/tnode.hpp"
#include "coreneuron/sim/multicore.hpp"
namespace coreneuron {
__device__ void triang_interleaved2_device(NrnThread* nt,
int icore,
int ncycle,
int* stride,
int lastnode) {
int icycle = ncycle - 1;
int istride = stride[icycle];
int i = lastnode - istride + icore;
int ip;
double p;
while (icycle >= 0) {
// most efficient if istride equal warpsize, else branch divergence!
if (icore < istride) {
ip = nt->_v_parent_index[i];
p = nt->_actual_a[i] / nt->_actual_d[i];
atomicAdd(&nt->_actual_d[ip], -p * nt->_actual_b[i]);
atomicAdd(&nt->_actual_rhs[ip], -p * nt->_actual_rhs[i]);
}
--icycle;
istride = stride[icycle];
i -= istride;
}
}
__device__ void bksub_interleaved2_device(NrnThread* nt,
int root,
int lastroot,
int icore,
int ncycle,
int* stride,
int firstnode) {
for (int i = root; i < lastroot; i += warpsize) {
nt->_actual_rhs[i] /= nt->_actual_d[i]; // the root
}
int i = firstnode + icore;
int ip;
for (int icycle = 0; icycle < ncycle; ++icycle) {
int istride = stride[icycle];
if (icore < istride) {
ip = nt->_v_parent_index[i];
nt->_actual_rhs[i] -= nt->_actual_b[i] * nt->_actual_rhs[ip];
nt->_actual_rhs[i] /= nt->_actual_d[i];
}
i += istride;
}
}
__global__ void solve_interleaved2_kernel(NrnThread* nt, InterleaveInfo* ii, int ncore) {
int icore = blockDim.x * blockIdx.x + threadIdx.x;
int* ncycles = ii->cellsize; // nwarp of these
int* stridedispl = ii->stridedispl; // nwarp+1 of these
int* strides = ii->stride; // sum ncycles of these (bad since ncompart/warpsize)
int* rootbegin = ii->firstnode; // nwarp+1 of these
int* nodebegin = ii->lastnode; // nwarp+1 of these
while (icore < ncore) {
int iwarp = icore / warpsize; // figure out the >> value
int ic = icore & (warpsize - 1); // figure out the & mask
int ncycle = ncycles[iwarp];
int* stride = strides + stridedispl[iwarp];
int root = rootbegin[iwarp];
int lastroot = rootbegin[iwarp + 1];
int firstnode = nodebegin[iwarp];
int lastnode = nodebegin[iwarp + 1];
triang_interleaved2_device(nt, ic, ncycle, stride, lastnode);
bksub_interleaved2_device(nt, root + ic, lastroot, ic, ncycle, stride, firstnode);
icore += blockDim.x * gridDim.x;
}
}
void solve_interleaved2_launcher(NrnThread* nt, InterleaveInfo* info, int ncore, void* stream) {
auto cuda_stream = static_cast<hipStream_t>(stream);
/// the selection of these parameters has been done after running the channel-benchmark for
/// typical production runs, i.e. 1 MPI task with 1440 cells & 6 MPI tasks with 8800 cells.
/// In the OpenACC/OpenMP implementations threadsPerBlock is set to 32. From profiling the
/// channel-benchmark circuits mentioned above we figured out that the best performance was
/// achieved with this configuration
int threadsPerBlock = warpsize;
/// Max number of blocksPerGrid for NVIDIA GPUs is 65535, so we need to make sure that the
/// blocksPerGrid we launch the CUDA kernel with doesn't exceed this number
const auto maxBlocksPerGrid = 65535;
int provisionalBlocksPerGrid = (ncore + threadsPerBlock - 1) / threadsPerBlock;
int blocksPerGrid = provisionalBlocksPerGrid <= maxBlocksPerGrid ? provisionalBlocksPerGrid
: maxBlocksPerGrid;
hipLaunchKernelGGL(( solve_interleaved2_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, cuda_stream, nt, info, ncore);
hipStreamSynchronize(cuda_stream);
CHECKLAST("solve_interleaved2_launcher");
}
} // namespace coreneuron
| 3a19debf3282fa130de0da640b163469d9620392.cu | /*
# =============================================================================
# Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
#
# See top-level LICENSE file for details.
# =============================================================================
*/
#include "coreneuron/utils/utils_cuda.h"
#include "coreneuron/permute/cellorder.hpp"
#include "coreneuron/network/tnode.hpp"
#include "coreneuron/sim/multicore.hpp"
namespace coreneuron {
__device__ void triang_interleaved2_device(NrnThread* nt,
int icore,
int ncycle,
int* stride,
int lastnode) {
int icycle = ncycle - 1;
int istride = stride[icycle];
int i = lastnode - istride + icore;
int ip;
double p;
while (icycle >= 0) {
// most efficient if istride equal warpsize, else branch divergence!
if (icore < istride) {
ip = nt->_v_parent_index[i];
p = nt->_actual_a[i] / nt->_actual_d[i];
atomicAdd(&nt->_actual_d[ip], -p * nt->_actual_b[i]);
atomicAdd(&nt->_actual_rhs[ip], -p * nt->_actual_rhs[i]);
}
--icycle;
istride = stride[icycle];
i -= istride;
}
}
__device__ void bksub_interleaved2_device(NrnThread* nt,
int root,
int lastroot,
int icore,
int ncycle,
int* stride,
int firstnode) {
for (int i = root; i < lastroot; i += warpsize) {
nt->_actual_rhs[i] /= nt->_actual_d[i]; // the root
}
int i = firstnode + icore;
int ip;
for (int icycle = 0; icycle < ncycle; ++icycle) {
int istride = stride[icycle];
if (icore < istride) {
ip = nt->_v_parent_index[i];
nt->_actual_rhs[i] -= nt->_actual_b[i] * nt->_actual_rhs[ip];
nt->_actual_rhs[i] /= nt->_actual_d[i];
}
i += istride;
}
}
__global__ void solve_interleaved2_kernel(NrnThread* nt, InterleaveInfo* ii, int ncore) {
int icore = blockDim.x * blockIdx.x + threadIdx.x;
int* ncycles = ii->cellsize; // nwarp of these
int* stridedispl = ii->stridedispl; // nwarp+1 of these
int* strides = ii->stride; // sum ncycles of these (bad since ncompart/warpsize)
int* rootbegin = ii->firstnode; // nwarp+1 of these
int* nodebegin = ii->lastnode; // nwarp+1 of these
while (icore < ncore) {
int iwarp = icore / warpsize; // figure out the >> value
int ic = icore & (warpsize - 1); // figure out the & mask
int ncycle = ncycles[iwarp];
int* stride = strides + stridedispl[iwarp];
int root = rootbegin[iwarp];
int lastroot = rootbegin[iwarp + 1];
int firstnode = nodebegin[iwarp];
int lastnode = nodebegin[iwarp + 1];
triang_interleaved2_device(nt, ic, ncycle, stride, lastnode);
bksub_interleaved2_device(nt, root + ic, lastroot, ic, ncycle, stride, firstnode);
icore += blockDim.x * gridDim.x;
}
}
void solve_interleaved2_launcher(NrnThread* nt, InterleaveInfo* info, int ncore, void* stream) {
auto cuda_stream = static_cast<cudaStream_t>(stream);
/// the selection of these parameters has been done after running the channel-benchmark for
/// typical production runs, i.e. 1 MPI task with 1440 cells & 6 MPI tasks with 8800 cells.
/// In the OpenACC/OpenMP implementations threadsPerBlock is set to 32. From profiling the
/// channel-benchmark circuits mentioned above we figured out that the best performance was
/// achieved with this configuration
int threadsPerBlock = warpsize;
/// Max number of blocksPerGrid for NVIDIA GPUs is 65535, so we need to make sure that the
/// blocksPerGrid we launch the CUDA kernel with doesn't exceed this number
const auto maxBlocksPerGrid = 65535;
int provisionalBlocksPerGrid = (ncore + threadsPerBlock - 1) / threadsPerBlock;
int blocksPerGrid = provisionalBlocksPerGrid <= maxBlocksPerGrid ? provisionalBlocksPerGrid
: maxBlocksPerGrid;
solve_interleaved2_kernel<<<blocksPerGrid, threadsPerBlock, 0, cuda_stream>>>(nt, info, ncore);
cudaStreamSynchronize(cuda_stream);
CHECKLAST("solve_interleaved2_launcher");
}
} // namespace coreneuron
|
123a4a4a8e1ea3e1719f4335f8a0f89d139c0c58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "worklist.cuh"
namespace mgg {
namespace worklist {
__global__ void get_flag_num(char *flag1, vtx_t size, vtx_t *num)
{
size_t id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size)
{
if (flag1[id])
atomicAdd(num, 1);
}
}
__global__ void compute_lookup_buffer(vtx_t *vtx,
vtx_t *vtx_ptr, vtx_t *xadj,
uint *lookup_buffer, uint *outDegree,
vtx_t size) {
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
// vtx_t id=wl.data[tid];
lookup_buffer[vtx[tid]] = tid;
outDegree[tid] = xadj[vtx[tid] + 1] - xadj[vtx[tid]];
}
}
__global__ void worklist_reset(Worklist wl) { wl.reset_d(); }
__global__ void worklist_add(Worklist wl, vtx_t item) {
if (blockDim.x * blockIdx.x + threadIdx.x == 0) {
wl.append(item);
}
}
__global__ void worklist_init_full(Worklist wl, vtx_t n) {
size_t id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < n) {
wl.data[id] = id;
}
if (id == 0) {
*wl.count = n;
}
}
__global__ void worklist_min_max(Worklist wl, vtx_t n, vtx_t *min, vtx_t *max) {
size_t id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < n) {
vtx_t tmp = wl.data[id];
atomicMin(min, tmp);
atomicMax(max, tmp);
}
}
void Worklist::initFull(vtx_t n) {
hipLaunchKernelGGL(( worklist_init_full), dim3(n / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, *this, n);
}
// void Worklist::initFull(Worklist wl, vtx_t n, hipStream_t stream)
// {
// worklist_init_full<<<n / BLOCK_SIZE + 1, BLOCK_SIZE, 0, stream>>>(wl, n);
// }
vtx_t Worklist::get_sz() {
H_ERR(hipMemcpy(&this->c_count, this->count, sizeof(vtx_t),
hipMemcpyDeviceToHost));
return this->c_count;
}
vtx_t Worklist::get_sz(hipStream_t streams) {
H_ERR(hipMemcpyAsync(&this->c_count, this->count, sizeof(vtx_t),
hipMemcpyDeviceToHost, streams));
return this->c_count;
}
// void wlGetMinMax(Worklist wl, vtx_t n, vtx_t *min, vtx_t *max)
// {
// //hipLaunchKernelGGL(( worklist_init_full), dim3(n / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, wl, n);
// vtx_t size = wl_get_sz(&wl);
// worklist_min_max<<<n / BLOCK_SIZE + 1, BLOCK_SIZE>>>(wl, n, min, max);
// }
// void wl_sync(Worklist wl)
// {
// H_ERR(
// hipMemcpy(&wl.c_count, wl.count, sizeof(vtx_t),
// hipMemcpyDeviceToHost));
// }
__global__ void flag_to_wl(Worklist wl, char *flag1, vtx_t size) {
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
if (flag1[tid])
wl.warp_append(tid);
}
}
__global__ void wl_to_flag(Worklist wl, char *flag1, vtx_t size) {
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
vtx_t src = wl.data[tid];
flag1[src] = 1;
}
}
// after that, update flag_local
__global__ void flag_to_wl_remote_local(Worklist wl_remote, Worklist wl_local,
char *flag_local, char *flag_active,
vtx_t size) {
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
if (flag_active[tid]) {
if (flag_local[tid])
wl_local.warp_append(tid);
else
wl_remote.warp_append(tid);
}
}
}
// __global__ void compute_offset(Worklist wl, vtx_t *vtx_ptr, vtx_t *xadj,
// vtx_t size) {
// size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
// if (tid < size) {
// }
// }
void Worklist::reset() {
hipLaunchKernelGGL(( worklist_reset), dim3(1), dim3(1), 0, 0, *this);
this->creset();
}
// void wl_reset(Worklist wl, hipStream_t streams)
// {
// worklist_reset<<<1, 1, 0, streams>>>(wl);
// wl.creset();
// }
void Worklist::add_item(vtx_t item, hipStream_t streams) {
hipLaunchKernelGGL(( worklist_add), dim3(1), dim3(1), 0, streams, *this, item);
}
} // namespace worklist
} // namespace mgg | 123a4a4a8e1ea3e1719f4335f8a0f89d139c0c58.cu | #include "worklist.cuh"
namespace mgg {
namespace worklist {
__global__ void get_flag_num(char *flag1, vtx_t size, vtx_t *num)
{
size_t id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < size)
{
if (flag1[id])
atomicAdd(num, 1);
}
}
__global__ void compute_lookup_buffer(vtx_t *vtx,
vtx_t *vtx_ptr, vtx_t *xadj,
uint *lookup_buffer, uint *outDegree,
vtx_t size) {
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
// vtx_t id=wl.data[tid];
lookup_buffer[vtx[tid]] = tid;
outDegree[tid] = xadj[vtx[tid] + 1] - xadj[vtx[tid]];
}
}
__global__ void worklist_reset(Worklist wl) { wl.reset_d(); }
__global__ void worklist_add(Worklist wl, vtx_t item) {
if (blockDim.x * blockIdx.x + threadIdx.x == 0) {
wl.append(item);
}
}
__global__ void worklist_init_full(Worklist wl, vtx_t n) {
size_t id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < n) {
wl.data[id] = id;
}
if (id == 0) {
*wl.count = n;
}
}
__global__ void worklist_min_max(Worklist wl, vtx_t n, vtx_t *min, vtx_t *max) {
size_t id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < n) {
vtx_t tmp = wl.data[id];
atomicMin(min, tmp);
atomicMax(max, tmp);
}
}
void Worklist::initFull(vtx_t n) {
worklist_init_full<<<n / BLOCK_SIZE + 1, BLOCK_SIZE>>>(*this, n);
}
// void Worklist::initFull(Worklist wl, vtx_t n, cudaStream_t stream)
// {
// worklist_init_full<<<n / BLOCK_SIZE + 1, BLOCK_SIZE, 0, stream>>>(wl, n);
// }
vtx_t Worklist::get_sz() {
H_ERR(cudaMemcpy(&this->c_count, this->count, sizeof(vtx_t),
cudaMemcpyDeviceToHost));
return this->c_count;
}
vtx_t Worklist::get_sz(cudaStream_t streams) {
H_ERR(cudaMemcpyAsync(&this->c_count, this->count, sizeof(vtx_t),
cudaMemcpyDeviceToHost, streams));
return this->c_count;
}
// void wlGetMinMax(Worklist wl, vtx_t n, vtx_t *min, vtx_t *max)
// {
// // worklist_init_full<<<n / BLOCK_SIZE + 1, BLOCK_SIZE>>>(wl, n);
// vtx_t size = wl_get_sz(&wl);
// worklist_min_max<<<n / BLOCK_SIZE + 1, BLOCK_SIZE>>>(wl, n, min, max);
// }
// void wl_sync(Worklist wl)
// {
// H_ERR(
// cudaMemcpy(&wl.c_count, wl.count, sizeof(vtx_t),
// cudaMemcpyDeviceToHost));
// }
__global__ void flag_to_wl(Worklist wl, char *flag1, vtx_t size) {
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
if (flag1[tid])
wl.warp_append(tid);
}
}
__global__ void wl_to_flag(Worklist wl, char *flag1, vtx_t size) {
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
vtx_t src = wl.data[tid];
flag1[src] = 1;
}
}
// after that, update flag_local
__global__ void flag_to_wl_remote_local(Worklist wl_remote, Worklist wl_local,
char *flag_local, char *flag_active,
vtx_t size) {
size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
if (flag_active[tid]) {
if (flag_local[tid])
wl_local.warp_append(tid);
else
wl_remote.warp_append(tid);
}
}
}
// __global__ void compute_offset(Worklist wl, vtx_t *vtx_ptr, vtx_t *xadj,
// vtx_t size) {
// size_t tid = blockDim.x * blockIdx.x + threadIdx.x;
// if (tid < size) {
// }
// }
void Worklist::reset() {
worklist_reset<<<1, 1>>>(*this);
this->creset();
}
// void wl_reset(Worklist wl, cudaStream_t streams)
// {
// worklist_reset<<<1, 1, 0, streams>>>(wl);
// wl.creset();
// }
void Worklist::add_item(vtx_t item, cudaStream_t streams) {
worklist_add<<<1, 1, 0, streams>>>(*this, item);
}
} // namespace worklist
} // namespace mgg |
aa73cf10d4e4d9e1c55541b8528aa871f85e5ecc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===---- reduction.cu - GPU OpenMP reduction implementation ----- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// --- Copy of llvm-project/openmp/libomptarget/common/src/reduction.cu ---
// This file contains the implementation of reduction with KMPC interface.
// Runtime impl of @omp_shuffle_and_reduce_func using redux.sync
// int32_t types
//
//===----------------------------------------------------------------------===//
#pragma omp declare target
#include "common/omptarget.h"
#include "target/shuffle.h"
#include "target_impl.h"
#define N 1000
// shared reduction transfer heap
[[clang::loader_uninitialized]] int32_t transfer_reduce[32];
#pragma omp allocate(transfer_reduce) allocator(omp_pteam_mem_alloc)
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
#pragma omp begin declare variant match(device={isa(sm_80)}, implementation = {extension(match_any)})
INLINE static void gpu_warp_reduce_v2(void *reduce_data, uint32_t size, uint32_t tid) {
int32_t WarpId = tid / WARPSIZE;
int32_t *local = *(int32_t **)reduce_data;
for (int32_t i = 0; i < N; i++) {
transfer_reduce[WarpId] = __nvvm_redux_sync_add(local[i], 0xFF);
__syncthreads();
if (WarpId < 1) local[i] = transfer_reduce[tid];
__syncthreads();
}
}
INLINE static void gpu_master_warp_reduce_v2(void *reduce_data, uint32_t size,
uint32_t tid) {
int32_t *local = *(int32_t **)reduce_data;
for(int32_t i = 0; i < N; i++) {
transfer_reduce[0] = __nvvm_redux_sync_add(local[i], size);
__syncthreads();
if (tid == 0) local[i] = transfer_reduce[0];
__syncthreads();
}
}
#pragma omp end declare variant
INLINE static void gpu_warp_reduce_v2(void *reduce_data, uint32_t size, uint32_t tid) {
;
}
INLINE static void gpu_master_warp_reduce_v2(void *reduce_data, uint32_t size,
uint32_t tid) {
;
}
INLINE static void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
INLINE static void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
INLINE static uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
__kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt();
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
uint32_t logical_lane_id = __kmpc_impl_popc(Liveness & lanemask_lt) * 2;
__kmpc_impl_lanemask_t lanemask_gt = __kmpc_impl_lanemask_gt();
do {
Liveness = __kmpc_impl_activemask();
remote_id = __kmpc_impl_ffs(Liveness & lanemask_gt);
size = __kmpc_impl_popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
#endif
INLINE
static int32_t nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
uint32_t myWarpSize = (WarpsNeeded - WarpId) == 1 ?
NumThreads % WarpsNeeded : 32;
gpu_warp_reduce_v2(reduce_data, myWarpSize, BlockThreadId);
if (NumThreads > WARPSIZE && WarpId == 0)
gpu_master_warp_reduce_v2(reduce_data, WarpsNeeded, BlockThreadId);
return BlockThreadId == 0;
#elif defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
}
return BlockThreadId == 0;
#else
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size,
void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
checkSPMDMode(loc), checkRuntimeUninitialized(loc));
}
INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) {
return checkGenericMode(loc) || IsTeamMaster(ThreadId);
}
INLINE static uint32_t roundToWarpsize(uint32_t s) {
if (s < WARPSIZE)
return 1;
return (s & ~(unsigned)(WARPSIZE - 1));
}
INLINE static uint32_t kmpcMin(uint32_t x, uint32_t y) { return x < y ? x : y; }
static volatile uint32_t IterCnt = 0;
static volatile uint32_t Cnt = 0;
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, void *global_buffer,
int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct,
kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct,
kmp_ListGlobalFctPtr glredFct) {
// Terminate all threads in non-SPMD mode except for the master thread.
if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID())
return 0;
uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
static unsigned SHARED(Bound);
static unsigned SHARED(ChunkTeamCount);
// Block progress for teams greater than the current upper
// limit. We always only allow a number of teams less or equal
// to the number of slots in the buffer.
bool IsMaster = isMaster(loc, ThreadId);
while (IsMaster) {
// Atomic read
Bound = __kmpc_atomic_add((uint32_t *)&IterCnt, 0u);
if (TeamId < Bound + num_of_records)
break;
}
if (IsMaster) {
int ModBockId = TeamId % num_of_records;
if (TeamId < num_of_records)
lgcpyFct(global_buffer, ModBockId, reduce_data);
else
lgredFct(global_buffer, ModBockId, reduce_data);
__kmpc_impl_threadfence_system();
// Increment team counter.
// This counter is incremented by all teams in the current
// BUFFER_SIZE chunk.
ChunkTeamCount = __kmpc_atomic_inc((uint32_t *)&Cnt, num_of_records - 1u);
}
// Synchronize
if (checkSPMDMode(loc))
__kmpc_barrier(loc, global_tid);
// reduce_data is global or shared so before being reduced within the
// warp we need to bring it in local memory:
// local_reduce_data = reduce_data[i]
//
// Example for 3 reduction variables a, b, c (of potentially different
// types):
//
// buffer layout (struct of arrays):
// a, a, ..., a, b, b, ... b, c, c, ... c
// |__________|
// num_of_records
//
// local_data_reduce layout (struct):
// a, b, c
//
// Each thread will have a local struct containing the values to be
// reduced:
// 1. do reduction within each warp.
// 2. do reduction across warps.
// 3. write the final result to the main reduction variable
// by returning 1 in the thread holding the reduction result.
// Check if this is the very last team.
unsigned NumRecs = kmpcMin(NumTeams, uint32_t(num_of_records));
if (ChunkTeamCount == NumTeams - Bound - 1) {
//
// Last team processing.
//
if (ThreadId >= NumRecs)
return 0;
NumThreads = roundToWarpsize(kmpcMin(NumThreads, NumRecs));
if (ThreadId >= NumThreads)
return 0;
// Load from buffer and reduce.
glcpyFct(global_buffer, ThreadId, reduce_data);
for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads)
glredFct(global_buffer, i, reduce_data);
// Reduce across warps to the warp master.
if (NumThreads > 1) {
gpu_regular_warp_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = kmpcMin(NumRecs, NumThreads);
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
ThreadId);
}
}
if (IsMaster) {
Cnt = 0;
IterCnt = 0;
return 1;
}
return 0;
}
if (IsMaster && ChunkTeamCount == num_of_records - 1) {
// Allow SIZE number of teams to proceed writing their
// intermediate results to the global buffer.
__kmpc_atomic_add((uint32_t *)&IterCnt, uint32_t(num_of_records));
}
return 0;
}
#pragma omp end declare target
| aa73cf10d4e4d9e1c55541b8528aa871f85e5ecc.cu | //===---- reduction.cu - GPU OpenMP reduction implementation ----- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// --- Copy of llvm-project/openmp/libomptarget/common/src/reduction.cu ---
// This file contains the implementation of reduction with KMPC interface.
// Runtime impl of @omp_shuffle_and_reduce_func using redux.sync
// int32_t types
//
//===----------------------------------------------------------------------===//
#pragma omp declare target
#include "common/omptarget.h"
#include "target/shuffle.h"
#include "target_impl.h"
#define N 1000
// shared reduction transfer heap
[[clang::loader_uninitialized]] int32_t transfer_reduce[32];
#pragma omp allocate(transfer_reduce) allocator(omp_pteam_mem_alloc)
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
#pragma omp begin declare variant match(device={isa(sm_80)}, implementation = {extension(match_any)})
INLINE static void gpu_warp_reduce_v2(void *reduce_data, uint32_t size, uint32_t tid) {
int32_t WarpId = tid / WARPSIZE;
int32_t *local = *(int32_t **)reduce_data;
for (int32_t i = 0; i < N; i++) {
transfer_reduce[WarpId] = __nvvm_redux_sync_add(local[i], 0xFF);
__syncthreads();
if (WarpId < 1) local[i] = transfer_reduce[tid];
__syncthreads();
}
}
INLINE static void gpu_master_warp_reduce_v2(void *reduce_data, uint32_t size,
uint32_t tid) {
int32_t *local = *(int32_t **)reduce_data;
for(int32_t i = 0; i < N; i++) {
transfer_reduce[0] = __nvvm_redux_sync_add(local[i], size);
__syncthreads();
if (tid == 0) local[i] = transfer_reduce[0];
__syncthreads();
}
}
#pragma omp end declare variant
INLINE static void gpu_warp_reduce_v2(void *reduce_data, uint32_t size, uint32_t tid) {
;
}
INLINE static void gpu_master_warp_reduce_v2(void *reduce_data, uint32_t size,
uint32_t tid) {
;
}
INLINE static void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
INLINE static void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700
INLINE static uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
__kmpc_impl_lanemask_t lanemask_lt = __kmpc_impl_lanemask_lt();
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
uint32_t logical_lane_id = __kmpc_impl_popc(Liveness & lanemask_lt) * 2;
__kmpc_impl_lanemask_t lanemask_gt = __kmpc_impl_lanemask_gt();
do {
Liveness = __kmpc_impl_activemask();
remote_id = __kmpc_impl_ffs(Liveness & lanemask_gt);
size = __kmpc_impl_popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
#endif
INLINE
static int32_t nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
uint32_t myWarpSize = (WarpsNeeded - WarpId) == 1 ?
NumThreads % WarpsNeeded : 32;
gpu_warp_reduce_v2(reduce_data, myWarpSize, BlockThreadId);
if (NumThreads > WARPSIZE && WarpId == 0)
gpu_master_warp_reduce_v2(reduce_data, WarpsNeeded, BlockThreadId);
return BlockThreadId == 0;
#elif defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
}
return BlockThreadId == 0;
#else
__kmpc_impl_lanemask_t Liveness = __kmpc_impl_activemask();
if (Liveness == __kmpc_impl_all_lanes) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__kmpc_impl_popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size,
void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
checkSPMDMode(loc), checkRuntimeUninitialized(loc));
}
INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) {
return checkGenericMode(loc) || IsTeamMaster(ThreadId);
}
INLINE static uint32_t roundToWarpsize(uint32_t s) {
if (s < WARPSIZE)
return 1;
return (s & ~(unsigned)(WARPSIZE - 1));
}
INLINE static uint32_t kmpcMin(uint32_t x, uint32_t y) { return x < y ? x : y; }
static volatile uint32_t IterCnt = 0;
static volatile uint32_t Cnt = 0;
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, void *global_buffer,
int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct,
kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct,
kmp_ListGlobalFctPtr glredFct) {
// Terminate all threads in non-SPMD mode except for the master thread.
if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID())
return 0;
uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
static unsigned SHARED(Bound);
static unsigned SHARED(ChunkTeamCount);
// Block progress for teams greater than the current upper
// limit. We always only allow a number of teams less or equal
// to the number of slots in the buffer.
bool IsMaster = isMaster(loc, ThreadId);
while (IsMaster) {
// Atomic read
Bound = __kmpc_atomic_add((uint32_t *)&IterCnt, 0u);
if (TeamId < Bound + num_of_records)
break;
}
if (IsMaster) {
int ModBockId = TeamId % num_of_records;
if (TeamId < num_of_records)
lgcpyFct(global_buffer, ModBockId, reduce_data);
else
lgredFct(global_buffer, ModBockId, reduce_data);
__kmpc_impl_threadfence_system();
// Increment team counter.
// This counter is incremented by all teams in the current
// BUFFER_SIZE chunk.
ChunkTeamCount = __kmpc_atomic_inc((uint32_t *)&Cnt, num_of_records - 1u);
}
// Synchronize
if (checkSPMDMode(loc))
__kmpc_barrier(loc, global_tid);
// reduce_data is global or shared so before being reduced within the
// warp we need to bring it in local memory:
// local_reduce_data = reduce_data[i]
//
// Example for 3 reduction variables a, b, c (of potentially different
// types):
//
// buffer layout (struct of arrays):
// a, a, ..., a, b, b, ... b, c, c, ... c
// |__________|
// num_of_records
//
// local_data_reduce layout (struct):
// a, b, c
//
// Each thread will have a local struct containing the values to be
// reduced:
// 1. do reduction within each warp.
// 2. do reduction across warps.
// 3. write the final result to the main reduction variable
// by returning 1 in the thread holding the reduction result.
// Check if this is the very last team.
unsigned NumRecs = kmpcMin(NumTeams, uint32_t(num_of_records));
if (ChunkTeamCount == NumTeams - Bound - 1) {
//
// Last team processing.
//
if (ThreadId >= NumRecs)
return 0;
NumThreads = roundToWarpsize(kmpcMin(NumThreads, NumRecs));
if (ThreadId >= NumThreads)
return 0;
// Load from buffer and reduce.
glcpyFct(global_buffer, ThreadId, reduce_data);
for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads)
glredFct(global_buffer, i, reduce_data);
// Reduce across warps to the warp master.
if (NumThreads > 1) {
gpu_regular_warp_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = kmpcMin(NumRecs, NumThreads);
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
ThreadId);
}
}
if (IsMaster) {
Cnt = 0;
IterCnt = 0;
return 1;
}
return 0;
}
if (IsMaster && ChunkTeamCount == num_of_records - 1) {
// Allow SIZE number of teams to proceed writing their
// intermediate results to the global buffer.
__kmpc_atomic_add((uint32_t *)&IterCnt, uint32_t(num_of_records));
}
return 0;
}
#pragma omp end declare target
|
9cd75502c2254b192fa7837402fdc72fa724952a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale = 1.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(0.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
hipComplex kei=unity;
for(v=0;v<20;v++)
{
cue = cue- (mitlef(cue,mouse)-cue)/(mitlef(q,mouse) -unity);
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | 9cd75502c2254b192fa7837402fdc72fa724952a.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale = 1.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(0.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
cuComplex kei=unity;
for(v=0;v<20;v++)
{
cue = cue- (mitlef(cue,mouse)-cue)/(mitlef(q,mouse) -unity);
}
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
a66b4f2255aced2fbc06ad29239839586b9985f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "quant.hpp"
#include <hiprand/hiprand.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
void quant_opt::init(){
init_base();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, device_).requires_grad(false);
weight_ = at::zeros({channel_, bin_num_},options);
count_data_ = at::zeros({channel_, bin_num_},options);
iter_ = 0;
}
void quant_opt::reshape(int num, int channel, int height, int width){
if(!reshape_base(num,channel,height,width)) return ;
auto options = torch::TensorOptions().dtype(torch::kInt).device(torch::kCUDA, device_).requires_grad(false);
quant_ = at::zeros({num_, channel_, height_, width_},options);
}
void quant_opt::reshape_top(at::TensorOptions options){
std::vector<std::vector<int64_t>> shapes;
shapes.push_back({num_,channel_,height_,width_});
if (ntop_> 1) shapes.push_back({num_,channel_,height_,width_});
reshape_top_base(options,shapes);
}
void quant_opt::reshape_bottom(at::TensorOptions options){
std::vector<std::vector<int64_t>> shapes;
shapes.push_back({num_,channel_,height_,width_});
shapes.push_back({channel_, bin_num_});
reshape_bottom_base(options,shapes);
}
template <typename scalar_t>
__global__ void ml_quant_cal_weight_kernel(const int nthreads, const scalar_t* const weight_b, scalar_t * const weight, const int levels) {
CUDA_KERNEL_LOOP(index, nthreads) {
if (index%levels == 0)
weight[index] = weight_b[index];
else
weight[index] = exp(weight_b[index]);
}
}
template <typename scalar_t>
__global__ void ml_quant_single_gpu_forward_kernel(const int num, const scalar_t* const bottom, int * const quant,
scalar_t * const top, const scalar_t * const weight, scalar_t * const count, const int inner_shape,
const int channels, const int levels) {
CUDA_KERNEL_LOOP(i, num)
{
int pc = (i / inner_shape) % channels;
scalar_t tmp = bottom[i] - weight[pc*levels];
if (tmp < 0) {
quant[i] = 0;
top[i] = weight[pc*levels];
atomicAdd((float *)(count + pc*levels), float(1.0));
//count[pc*levels]++;
continue;
}
int j = 1;
for (; j < levels; j++)
{
tmp -= weight[pc*levels + j];
if (tmp < 0)
break;
}
if (j == levels) j--;
if (tmp + tmp + weight[pc*levels + j] < 0) {
tmp = tmp + weight[pc*levels + j];
j--;
}
top[i] = bottom[i] - tmp;
quant[i] = j;
atomicAdd((float *)(count + pc*levels+j), float(1.0));
}
}
template <typename scalar_t>
__global__ void ml_quant_gpu_copy(const int nthreads, const int * const quant,
scalar_t * const top)
{
CUDA_KERNEL_LOOP(index, nthreads) {
top[index] = quant[index];
}
}
template <typename scalar_t>
__global__ void ml_quant_check_weight(const int nthreads, scalar_t * const weight,const scalar_t * const count, const int levels ){
CUDA_KERNEL_LOOP(i, nthreads) {
int j = levels - 1;
for( ; j>1;j--){
if(count[i*levels + j] >= 1)
break;
}
scalar_t tmp = weight[i*levels+j]-log(static_cast<scalar_t>(levels - j));
for (; j < levels; j++)
weight[i*levels + j] = tmp;
if (count[i*levels] < 1)
{
weight[i*levels] = weight[i*levels] + exp(weight[i*levels + 1]);
tmp = log((exp(weight[i*levels + 1]) + exp(weight[i*levels + 2])) / 2);
weight[i*levels + 1] = tmp;
weight[i*levels + 2] = tmp;
//LOG(INFO) << "update channel " << i;
}
}
}
template <typename scalar_t>
__global__ void ml_quant_scale(const int count, scalar_t * input, scalar_t alpha){
CUDA_KERNEL_LOOP(index, count) {
input[index] = input[index]*alpha;
}
}
void quant_opt::update_weight(at::Tensor weight){
if (iter_ % mod_ != 0 || iter_ == 0 ) return;
//printf("check_weights %f\n", weight_decay_);
AT_DISPATCH_FLOATING_TYPES(
weight.scalar_type(), "quant_update_weight_cuda",
([&] {
//caffe_gpu_scal(handle_,channel_*bin_num_, static_cast<scalar_t>(weight_decay_), count_data_.data_ptr<scalar_t>());
ml_quant_check_weight<< <CAFFE_GET_BLOCKS(channel_), CAFFE_CUDA_NUM_THREADS, 0 , stream_ >> >
(channel_,weight.data_ptr<scalar_t>(),count_data_.data_ptr<scalar_t>(),bin_num_);
ml_quant_scale<< <CAFFE_GET_BLOCKS(channel_*bin_num_), CAFFE_CUDA_NUM_THREADS, 0 , stream_ >> >
(channel_*bin_num_, count_data_.data_ptr<scalar_t>(), static_cast<scalar_t>(weight_decay_));
CUDA_POST_KERNEL_CHECK;
}
)
);
}
std::vector<at::Tensor> quant_opt::quant_forward_cuda(at::Tensor bottom_data, at::Tensor weight_old, bool train)
{
reshape(bottom_data.size(0), bottom_data.size(1), bottom_data.size(2), bottom_data.size(3));
reshape_top(bottom_data.options());
if(train) update_weight(weight_old);
//at::Tensor tmp_vec = at::empty({channel_,bin_num_},bottom_data.options());
int count;
AT_DISPATCH_FLOATING_TYPES(
bottom_data.scalar_type(), "quant_forward_cuda",
([&] {
timer_->start();
count = channel_* bin_num_;
ml_quant_cal_weight_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0 , stream_ >> >
(count, weight_old.data_ptr<scalar_t>(), weight_.data_ptr<scalar_t>(), bin_num_);
count = num_ * channel_ * width_ * height_;
ml_quant_single_gpu_forward_kernel<< <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >
(count, bottom_data.data_ptr<scalar_t>(), quant_.data_ptr<int>(), top_data_[0].data_ptr<scalar_t>(), weight_.data_ptr<scalar_t>(),
count_data_.data_ptr<scalar_t>(), width_*height_, channel_, bin_num_);
if(ntop_>1){
ml_quant_gpu_copy << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >(count, quant_.data_ptr<int>(), top_data_[1].data_ptr<scalar_t>());
}
CUDA_POST_KERNEL_CHECK;
timer_->stop("kernel 1");
//caffe_gpu_memcpy(bin_num_*channel_*sizeof(scalar_t), count_data_.data_ptr<scalar_t>(),tmp_vec.data_ptr<scalar_t>());
}
)
);
if(train)
iter_ += 1;
//printf("iter:%d...\n",iter_);
return top_data_;
}
template <typename scalar_t>
__global__ void ml_quant_backward_l1_kernel(const int nthreads, scalar_t * const weight) {
CUDA_KERNEL_LOOP(index, nthreads) {
if (weight[index] < -0.0000001)
weight[index] = -1.0;
else if (weight[index] > 0.0000001)
weight[index] = 1.0;
}
}
template <typename scalar_t>
__global__ void ml_quant_single_gpu_backward_kernel(const int num, const int * const quant,
const scalar_t * const top_diff, scalar_t * const weight_diff, const int inner_shape,
const int channels, const int levels) {
CUDA_KERNEL_LOOP(i, num) {
int pc = i % channels;
int idx = (i / channels) % inner_shape + (i / channels / inner_shape)*channels*inner_shape
+ pc*inner_shape;
//int pc = (i / inner_shape) % channels;
for (int j = 0; j <= quant[idx]; j++)
{
atomicAdd((float *)(weight_diff + pc*levels + j), float(top_diff[idx]));
//weight_diff[pc*levels + j] += top_diff[i];
}
}
}
template <typename scalar_t>
__global__ void ml_quant_cal_weight_diff_kernel(const int num, scalar_t* const weight,
const scalar_t * const val, const int levels) {
CUDA_KERNEL_LOOP(i, num)
{
if (i%levels != 0)
weight[i] = weight[i] * val[i];
}
}
template <typename scalar_t>
__global__ void ml_quant_top_diff_kernel(const int num, const scalar_t* const weight,
const scalar_t * top_data, const scalar_t * bottom_data,
const int * const quant, const scalar_t * const top_diff, scalar_t* const bottom_diff,
const scalar_t alpha, const int level, const int inner_shape, const int channels) {
CUDA_KERNEL_LOOP(i, num)
{
int tc = (i / inner_shape) % channels;
scalar_t beta = 1.0;
if (top_data[i] < bottom_data[i]) {
beta = quant[i]<level - 1? weight[tc*level + quant[i] + 1]: 10000;
}
else if (top_data[i] > bottom_data[i]) {
beta = quant[i]>0 ? weight[tc*level + quant[i]] : 10000;
}
else {
if (quant[i] == 0) {
beta = weight[tc*level + quant[i] + 1];
}
else if (quant[i] < level - 1) {
beta = (weight[tc*level + quant[i]] + weight[tc*level + quant[i] + 1]) / 2.0;
}
else {
beta = weight[tc*level + quant[i]];
}
}
if (beta < 0.001) beta = 0.001;
bottom_diff[i] = bottom_diff[i] + alpha*top_diff[i] / beta;
}
}
std::vector<at::Tensor> quant_opt::quant_backward_cuda(std::vector<at::Tensor> top_diff, at::Tensor bottom_data, at::Tensor top_data){
reshape_bottom(top_diff[0].options());
int num_thr = num_*channel_*height_*width_;
AT_DISPATCH_FLOATING_TYPES(
top_diff[0].scalar_type(), "quant_backward_cuda",
([&] {
bottom_diff_[0] = top_data - bottom_data;
hipMemset(bottom_diff_[1].data_ptr<scalar_t>(), scalar_t(0.0), channel_* bin_num_*sizeof(scalar_t));
ml_quant_single_gpu_backward_kernel<scalar_t> << <CAFFE_GET_BLOCKS(num_thr), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >
(num_thr, quant_.data_ptr<int>(), bottom_diff_[0].data_ptr<scalar_t>(),
bottom_diff_[1].data_ptr<scalar_t>(), width_*height_, channel_, bin_num_);
ml_quant_cal_weight_diff_kernel<scalar_t> << <CAFFE_GET_BLOCKS(channel_*bin_num_), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >
(channel_*bin_num_, bottom_diff_[1].data_ptr<scalar_t>(),weight_.data_ptr<scalar_t>(), bin_num_);
bottom_diff_[0].copy_(top_diff[0]);
if(ntop_>1){
ml_quant_top_diff_kernel << <CAFFE_GET_BLOCKS(num_thr), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >
(num_thr, weight_.data_ptr<scalar_t>(), top_data_[0].data_ptr<scalar_t>(),bottom_data.data_ptr<scalar_t>(),
quant_.data_ptr<int>(), top_diff[1].data_ptr<scalar_t>(),
bottom_diff_[0].data_ptr<scalar_t>(), static_cast<scalar_t>(top_alpha_), bin_num_, width_*height_, channel_);
}
CUDA_POST_KERNEL_CHECK;
}
)
);
return bottom_diff_;
} | a66b4f2255aced2fbc06ad29239839586b9985f8.cu | #include "quant.hpp"
#include <curand.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
void quant_opt::init(){
init_base();
auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, device_).requires_grad(false);
weight_ = at::zeros({channel_, bin_num_},options);
count_data_ = at::zeros({channel_, bin_num_},options);
iter_ = 0;
}
void quant_opt::reshape(int num, int channel, int height, int width){
if(!reshape_base(num,channel,height,width)) return ;
auto options = torch::TensorOptions().dtype(torch::kInt).device(torch::kCUDA, device_).requires_grad(false);
quant_ = at::zeros({num_, channel_, height_, width_},options);
}
void quant_opt::reshape_top(at::TensorOptions options){
std::vector<std::vector<int64_t>> shapes;
shapes.push_back({num_,channel_,height_,width_});
if (ntop_> 1) shapes.push_back({num_,channel_,height_,width_});
reshape_top_base(options,shapes);
}
void quant_opt::reshape_bottom(at::TensorOptions options){
std::vector<std::vector<int64_t>> shapes;
shapes.push_back({num_,channel_,height_,width_});
shapes.push_back({channel_, bin_num_});
reshape_bottom_base(options,shapes);
}
template <typename scalar_t>
__global__ void ml_quant_cal_weight_kernel(const int nthreads, const scalar_t* const weight_b, scalar_t * const weight, const int levels) {
CUDA_KERNEL_LOOP(index, nthreads) {
if (index%levels == 0)
weight[index] = weight_b[index];
else
weight[index] = exp(weight_b[index]);
}
}
template <typename scalar_t>
__global__ void ml_quant_single_gpu_forward_kernel(const int num, const scalar_t* const bottom, int * const quant,
scalar_t * const top, const scalar_t * const weight, scalar_t * const count, const int inner_shape,
const int channels, const int levels) {
CUDA_KERNEL_LOOP(i, num)
{
int pc = (i / inner_shape) % channels;
scalar_t tmp = bottom[i] - weight[pc*levels];
if (tmp < 0) {
quant[i] = 0;
top[i] = weight[pc*levels];
atomicAdd((float *)(count + pc*levels), float(1.0));
//count[pc*levels]++;
continue;
}
int j = 1;
for (; j < levels; j++)
{
tmp -= weight[pc*levels + j];
if (tmp < 0)
break;
}
if (j == levels) j--;
if (tmp + tmp + weight[pc*levels + j] < 0) {
tmp = tmp + weight[pc*levels + j];
j--;
}
top[i] = bottom[i] - tmp;
quant[i] = j;
atomicAdd((float *)(count + pc*levels+j), float(1.0));
}
}
template <typename scalar_t>
__global__ void ml_quant_gpu_copy(const int nthreads, const int * const quant,
scalar_t * const top)
{
CUDA_KERNEL_LOOP(index, nthreads) {
top[index] = quant[index];
}
}
template <typename scalar_t>
__global__ void ml_quant_check_weight(const int nthreads, scalar_t * const weight,const scalar_t * const count, const int levels ){
CUDA_KERNEL_LOOP(i, nthreads) {
int j = levels - 1;
for( ; j>1;j--){
if(count[i*levels + j] >= 1)
break;
}
scalar_t tmp = weight[i*levels+j]-log(static_cast<scalar_t>(levels - j));
for (; j < levels; j++)
weight[i*levels + j] = tmp;
if (count[i*levels] < 1)
{
weight[i*levels] = weight[i*levels] + exp(weight[i*levels + 1]);
tmp = log((exp(weight[i*levels + 1]) + exp(weight[i*levels + 2])) / 2);
weight[i*levels + 1] = tmp;
weight[i*levels + 2] = tmp;
//LOG(INFO) << "update channel " << i;
}
}
}
template <typename scalar_t>
__global__ void ml_quant_scale(const int count, scalar_t * input, scalar_t alpha){
CUDA_KERNEL_LOOP(index, count) {
input[index] = input[index]*alpha;
}
}
void quant_opt::update_weight(at::Tensor weight){
if (iter_ % mod_ != 0 || iter_ == 0 ) return;
//printf("check_weights %f\n", weight_decay_);
AT_DISPATCH_FLOATING_TYPES(
weight.scalar_type(), "quant_update_weight_cuda",
([&] {
//caffe_gpu_scal(handle_,channel_*bin_num_, static_cast<scalar_t>(weight_decay_), count_data_.data_ptr<scalar_t>());
ml_quant_check_weight<< <CAFFE_GET_BLOCKS(channel_), CAFFE_CUDA_NUM_THREADS, 0 , stream_ >> >
(channel_,weight.data_ptr<scalar_t>(),count_data_.data_ptr<scalar_t>(),bin_num_);
ml_quant_scale<< <CAFFE_GET_BLOCKS(channel_*bin_num_), CAFFE_CUDA_NUM_THREADS, 0 , stream_ >> >
(channel_*bin_num_, count_data_.data_ptr<scalar_t>(), static_cast<scalar_t>(weight_decay_));
CUDA_POST_KERNEL_CHECK;
}
)
);
}
std::vector<at::Tensor> quant_opt::quant_forward_cuda(at::Tensor bottom_data, at::Tensor weight_old, bool train)
{
reshape(bottom_data.size(0), bottom_data.size(1), bottom_data.size(2), bottom_data.size(3));
reshape_top(bottom_data.options());
if(train) update_weight(weight_old);
//at::Tensor tmp_vec = at::empty({channel_,bin_num_},bottom_data.options());
int count;
AT_DISPATCH_FLOATING_TYPES(
bottom_data.scalar_type(), "quant_forward_cuda",
([&] {
timer_->start();
count = channel_* bin_num_;
ml_quant_cal_weight_kernel << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0 , stream_ >> >
(count, weight_old.data_ptr<scalar_t>(), weight_.data_ptr<scalar_t>(), bin_num_);
count = num_ * channel_ * width_ * height_;
ml_quant_single_gpu_forward_kernel<< <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >
(count, bottom_data.data_ptr<scalar_t>(), quant_.data_ptr<int>(), top_data_[0].data_ptr<scalar_t>(), weight_.data_ptr<scalar_t>(),
count_data_.data_ptr<scalar_t>(), width_*height_, channel_, bin_num_);
if(ntop_>1){
ml_quant_gpu_copy << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >(count, quant_.data_ptr<int>(), top_data_[1].data_ptr<scalar_t>());
}
CUDA_POST_KERNEL_CHECK;
timer_->stop("kernel 1");
//caffe_gpu_memcpy(bin_num_*channel_*sizeof(scalar_t), count_data_.data_ptr<scalar_t>(),tmp_vec.data_ptr<scalar_t>());
}
)
);
if(train)
iter_ += 1;
//printf("iter:%d...\n",iter_);
return top_data_;
}
template <typename scalar_t>
__global__ void ml_quant_backward_l1_kernel(const int nthreads, scalar_t * const weight) {
CUDA_KERNEL_LOOP(index, nthreads) {
if (weight[index] < -0.0000001)
weight[index] = -1.0;
else if (weight[index] > 0.0000001)
weight[index] = 1.0;
}
}
template <typename scalar_t>
__global__ void ml_quant_single_gpu_backward_kernel(const int num, const int * const quant,
const scalar_t * const top_diff, scalar_t * const weight_diff, const int inner_shape,
const int channels, const int levels) {
CUDA_KERNEL_LOOP(i, num) {
int pc = i % channels;
int idx = (i / channels) % inner_shape + (i / channels / inner_shape)*channels*inner_shape
+ pc*inner_shape;
//int pc = (i / inner_shape) % channels;
for (int j = 0; j <= quant[idx]; j++)
{
atomicAdd((float *)(weight_diff + pc*levels + j), float(top_diff[idx]));
//weight_diff[pc*levels + j] += top_diff[i];
}
}
}
template <typename scalar_t>
__global__ void ml_quant_cal_weight_diff_kernel(const int num, scalar_t* const weight,
const scalar_t * const val, const int levels) {
CUDA_KERNEL_LOOP(i, num)
{
if (i%levels != 0)
weight[i] = weight[i] * val[i];
}
}
template <typename scalar_t>
__global__ void ml_quant_top_diff_kernel(const int num, const scalar_t* const weight,
const scalar_t * top_data, const scalar_t * bottom_data,
const int * const quant, const scalar_t * const top_diff, scalar_t* const bottom_diff,
const scalar_t alpha, const int level, const int inner_shape, const int channels) {
CUDA_KERNEL_LOOP(i, num)
{
int tc = (i / inner_shape) % channels;
scalar_t beta = 1.0;
if (top_data[i] < bottom_data[i]) {
beta = quant[i]<level - 1? weight[tc*level + quant[i] + 1]: 10000;
}
else if (top_data[i] > bottom_data[i]) {
beta = quant[i]>0 ? weight[tc*level + quant[i]] : 10000;
}
else {
if (quant[i] == 0) {
beta = weight[tc*level + quant[i] + 1];
}
else if (quant[i] < level - 1) {
beta = (weight[tc*level + quant[i]] + weight[tc*level + quant[i] + 1]) / 2.0;
}
else {
beta = weight[tc*level + quant[i]];
}
}
if (beta < 0.001) beta = 0.001;
bottom_diff[i] = bottom_diff[i] + alpha*top_diff[i] / beta;
}
}
std::vector<at::Tensor> quant_opt::quant_backward_cuda(std::vector<at::Tensor> top_diff, at::Tensor bottom_data, at::Tensor top_data){
reshape_bottom(top_diff[0].options());
int num_thr = num_*channel_*height_*width_;
AT_DISPATCH_FLOATING_TYPES(
top_diff[0].scalar_type(), "quant_backward_cuda",
([&] {
bottom_diff_[0] = top_data - bottom_data;
cudaMemset(bottom_diff_[1].data_ptr<scalar_t>(), scalar_t(0.0), channel_* bin_num_*sizeof(scalar_t));
ml_quant_single_gpu_backward_kernel<scalar_t> << <CAFFE_GET_BLOCKS(num_thr), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >
(num_thr, quant_.data_ptr<int>(), bottom_diff_[0].data_ptr<scalar_t>(),
bottom_diff_[1].data_ptr<scalar_t>(), width_*height_, channel_, bin_num_);
ml_quant_cal_weight_diff_kernel<scalar_t> << <CAFFE_GET_BLOCKS(channel_*bin_num_), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >
(channel_*bin_num_, bottom_diff_[1].data_ptr<scalar_t>(),weight_.data_ptr<scalar_t>(), bin_num_);
bottom_diff_[0].copy_(top_diff[0]);
if(ntop_>1){
ml_quant_top_diff_kernel << <CAFFE_GET_BLOCKS(num_thr), CAFFE_CUDA_NUM_THREADS, 0, stream_ >> >
(num_thr, weight_.data_ptr<scalar_t>(), top_data_[0].data_ptr<scalar_t>(),bottom_data.data_ptr<scalar_t>(),
quant_.data_ptr<int>(), top_diff[1].data_ptr<scalar_t>(),
bottom_diff_[0].data_ptr<scalar_t>(), static_cast<scalar_t>(top_alpha_), bin_num_, width_*height_, channel_);
}
CUDA_POST_KERNEL_CHECK;
}
)
);
return bottom_diff_;
} |
7c11a7fb4c425a3569eb33b0a697b4ba9075bbf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef ENABLE_CURD
#include<curd_lib_host.h>
#else
#endif
#ifdef ENABLE_CURD
#define CURD_ALLOC(a, b) allocateReadWriteSets(a, b)
#define CURD_FREE(a, b) freeReadWriteSets(a, b)
#else
#define CURD_ALLOC(a, b)
#define CURD_FREE(a, b)
#endif
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple demonstration of hipcub::BlockReduce
******************************************************************************/
// Ensure printing of CUDA runtime errors to console (define before including cub.h)
#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_store.cuh>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
/// Verbose output
bool g_verbose = false;
/// Timing iterations
int g_timing_iterations = 100;
/// Default grid size
int g_grid_size = 1;
//---------------------------------------------------------------------
// Kernels
//---------------------------------------------------------------------
/**
* Simple kernel for performing a block-wide exclusive prefix sum over integers
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockReduceAlgorithm ALGORITHM>
__global__ void BlockSumKernel(
int *d_in, // Tile of input
int *d_out, // Tile aggregate
clock_t *d_elapsed) // Elapsed cycle count of block reduction
{
// Specialize BlockReduce type for our thread block
typedef BlockReduce<int, BLOCK_THREADS, ALGORITHM> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
// Per-thread tile data
int data[ITEMS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in, data);
// Start cycle timer
clock_t start = clock();
// Compute sum
int aggregate = BlockReduceT(temp_storage).Sum(data);
// Stop cycle timer
clock_t stop = clock();
// Store aggregate and elapsed clocks
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
*d_out = aggregate;
}
}
//---------------------------------------------------------------------
// Host utilities
//---------------------------------------------------------------------
/**
* Initialize reduction problem (and solution).
* Returns the aggregate
*/
int Initialize(int *h_in, int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
inclusive += h_in[i];
}
return inclusive;
}
/**
* Test thread block reduction
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockReduceAlgorithm ALGORITHM>
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Allocate host arrays
int *h_in = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
// Initialize problem and reference output on host
int h_aggregate = Initialize(h_in, TILE_SIZE);
// Initialize device arrays
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
hipMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
hipMalloc((void**)&d_out, sizeof(int) * 1);
hipMalloc((void**)&d_elapsed, sizeof(clock_t));
// Display input problem data
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
// Kernel props
int max_sm_occupancy;
CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS));
// Copy problem to device
hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice);
//printf("BlockReduce algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
// (ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : "BLOCK_REDUCE_WARP_REDUCTIONS",
// TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
// Run aggregate/prefix kernel
BENCHMARK.start_kernel();
{CURD_ALLOC(g_grid_size, BLOCK_THREADS);
hipLaunchKernelGGL(( BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(g_grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
d_elapsed);
CURD_FREE(g_grid_size, BLOCK_THREADS);}
BENCHMARK.end_kernel();
// Check total aggregate
//printf("\tAggregate: ");
int compare = CompareDeviceResults(&h_aggregate, d_out, 1, g_verbose, g_verbose);
//printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
if(compare != 0)
BENCHMARK.fail();
// Run this several times and average the performance results
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
// Copy problem to device
hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice);
timer.Start();
// Run aggregate/prefix kernel
BENCHMARK.start_kernel();
{CURD_ALLOC(g_grid_size, BLOCK_THREADS);
hipLaunchKernelGGL(( BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(g_grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out,
d_elapsed);
CURD_FREE(g_grid_size, BLOCK_THREADS);}
BENCHMARK.end_kernel();
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
// Copy clocks from device
clock_t clocks;
CubDebugExit(hipMemcpy(&clocks, d_elapsed, sizeof(clock_t), hipMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
// Check for kernel errors and STDIO from the kernel, if any
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Display timing results
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
// printf("\tAverage BlockReduce::Sum clocks: %.3f\n", avg_clocks);
// printf("\tAverage BlockReduce::Sum clocks per item: %.3f\n", avg_clocks_per_item);
// printf("\tAverage kernel millis: %.4f\n", avg_millis);
// printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
// Cleanup
if (h_in) delete[] h_in;
if (h_gpu) delete[] h_gpu;
if (d_in) hipFree(d_in);
if (d_out) hipFree(d_out);
if (d_elapsed) hipFree(d_elapsed);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations>] "
"[--grid-size=<grid size>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
BENCHMARK.start_total();
// Run tests
Test<1024, 1, BLOCK_REDUCE_RAKING>();
Test<512, 2, BLOCK_REDUCE_RAKING>();
Test<256, 4, BLOCK_REDUCE_RAKING>();
Test<128, 8, BLOCK_REDUCE_RAKING>();
Test<64, 16, BLOCK_REDUCE_RAKING>();
Test<32, 32, BLOCK_REDUCE_RAKING>();
Test<16, 64, BLOCK_REDUCE_RAKING>();
// printf("-------------\n");
Test<1024, 1, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<512, 2, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<256, 4, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<128, 8, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<64, 16, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<32, 32, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<16, 64, BLOCK_REDUCE_WARP_REDUCTIONS>();
BENCHMARK.end_total();
return 0;
}
| 7c11a7fb4c425a3569eb33b0a697b4ba9075bbf5.cu | #ifdef ENABLE_CURD
#include<curd_lib_host.h>
#else
#endif
#ifdef ENABLE_CURD
#define CURD_ALLOC(a, b) allocateReadWriteSets(a, b)
#define CURD_FREE(a, b) freeReadWriteSets(a, b)
#else
#define CURD_ALLOC(a, b)
#define CURD_FREE(a, b)
#endif
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple demonstration of cub::BlockReduce
******************************************************************************/
// Ensure printing of CUDA runtime errors to console (define before including cub.h)
#define CUB_STDERR
#include <stdio.h>
#include <iostream>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <cub/block/block_reduce.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
/// Verbose output
bool g_verbose = false;
/// Timing iterations
int g_timing_iterations = 100;
/// Default grid size
int g_grid_size = 1;
//---------------------------------------------------------------------
// Kernels
//---------------------------------------------------------------------
/**
* Simple kernel for performing a block-wide exclusive prefix sum over integers
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockReduceAlgorithm ALGORITHM>
__global__ void BlockSumKernel(
int *d_in, // Tile of input
int *d_out, // Tile aggregate
clock_t *d_elapsed) // Elapsed cycle count of block reduction
{
// Specialize BlockReduce type for our thread block
typedef BlockReduce<int, BLOCK_THREADS, ALGORITHM> BlockReduceT;
// Shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
// Per-thread tile data
int data[ITEMS_PER_THREAD];
LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in, data);
// Start cycle timer
clock_t start = clock();
// Compute sum
int aggregate = BlockReduceT(temp_storage).Sum(data);
// Stop cycle timer
clock_t stop = clock();
// Store aggregate and elapsed clocks
if (threadIdx.x == 0)
{
*d_elapsed = (start > stop) ? start - stop : stop - start;
*d_out = aggregate;
}
}
//---------------------------------------------------------------------
// Host utilities
//---------------------------------------------------------------------
/**
* Initialize reduction problem (and solution).
* Returns the aggregate
*/
int Initialize(int *h_in, int num_items)
{
int inclusive = 0;
for (int i = 0; i < num_items; ++i)
{
h_in[i] = i % 17;
inclusive += h_in[i];
}
return inclusive;
}
/**
* Test thread block reduction
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockReduceAlgorithm ALGORITHM>
void Test()
{
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Allocate host arrays
int *h_in = new int[TILE_SIZE];
int *h_gpu = new int[TILE_SIZE + 1];
// Initialize problem and reference output on host
int h_aggregate = Initialize(h_in, TILE_SIZE);
// Initialize device arrays
int *d_in = NULL;
int *d_out = NULL;
clock_t *d_elapsed = NULL;
cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE);
cudaMalloc((void**)&d_out, sizeof(int) * 1);
cudaMalloc((void**)&d_elapsed, sizeof(clock_t));
// Display input problem data
if (g_verbose)
{
printf("Input data: ");
for (int i = 0; i < TILE_SIZE; i++)
printf("%d, ", h_in[i]);
printf("\n\n");
}
// Kernel props
int max_sm_occupancy;
CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS));
// Copy problem to device
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
//printf("BlockReduce algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n",
// (ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : "BLOCK_REDUCE_WARP_REDUCTIONS",
// TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy);
// Run aggregate/prefix kernel
BENCHMARK.start_kernel();
{CURD_ALLOC(g_grid_size, BLOCK_THREADS);
BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>(
d_in,
d_out,
d_elapsed);
CURD_FREE(g_grid_size, BLOCK_THREADS);}
BENCHMARK.end_kernel();
// Check total aggregate
//printf("\tAggregate: ");
int compare = CompareDeviceResults(&h_aggregate, d_out, 1, g_verbose, g_verbose);
//printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
if(compare != 0)
BENCHMARK.fail();
// Run this several times and average the performance results
GpuTimer timer;
float elapsed_millis = 0.0;
clock_t elapsed_clocks = 0;
for (int i = 0; i < g_timing_iterations; ++i)
{
// Copy problem to device
cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice);
timer.Start();
// Run aggregate/prefix kernel
BENCHMARK.start_kernel();
{CURD_ALLOC(g_grid_size, BLOCK_THREADS);
BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>(
d_in,
d_out,
d_elapsed);
CURD_FREE(g_grid_size, BLOCK_THREADS);}
BENCHMARK.end_kernel();
timer.Stop();
elapsed_millis += timer.ElapsedMillis();
// Copy clocks from device
clock_t clocks;
CubDebugExit(cudaMemcpy(&clocks, d_elapsed, sizeof(clock_t), cudaMemcpyDeviceToHost));
elapsed_clocks += clocks;
}
// Check for kernel errors and STDIO from the kernel, if any
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Display timing results
float avg_millis = elapsed_millis / g_timing_iterations;
float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0;
float avg_clocks = float(elapsed_clocks) / g_timing_iterations;
float avg_clocks_per_item = avg_clocks / TILE_SIZE;
// printf("\tAverage BlockReduce::Sum clocks: %.3f\n", avg_clocks);
// printf("\tAverage BlockReduce::Sum clocks per item: %.3f\n", avg_clocks_per_item);
// printf("\tAverage kernel millis: %.4f\n", avg_millis);
// printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec);
// Cleanup
if (h_in) delete[] h_in;
if (h_gpu) delete[] h_gpu;
if (d_in) cudaFree(d_in);
if (d_out) cudaFree(d_out);
if (d_elapsed) cudaFree(d_elapsed);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("grid-size", g_grid_size);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--i=<timing iterations>] "
"[--grid-size=<grid size>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
BENCHMARK.start_total();
// Run tests
Test<1024, 1, BLOCK_REDUCE_RAKING>();
Test<512, 2, BLOCK_REDUCE_RAKING>();
Test<256, 4, BLOCK_REDUCE_RAKING>();
Test<128, 8, BLOCK_REDUCE_RAKING>();
Test<64, 16, BLOCK_REDUCE_RAKING>();
Test<32, 32, BLOCK_REDUCE_RAKING>();
Test<16, 64, BLOCK_REDUCE_RAKING>();
// printf("-------------\n");
Test<1024, 1, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<512, 2, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<256, 4, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<128, 8, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<64, 16, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<32, 32, BLOCK_REDUCE_WARP_REDUCTIONS>();
Test<16, 64, BLOCK_REDUCE_WARP_REDUCTIONS>();
BENCHMARK.end_total();
return 0;
}
|
6176e080c9a4190ec527f5b9e85d3afbc9a4c2b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/limits.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace disp_bilateral_filter
{
__constant__ float* ctable_color;
__constant__ float* ctable_space;
__constant__ size_t ctable_space_step;
__constant__ int cndisp;
__constant__ int cradius;
__constant__ short cedge_disc;
__constant__ short cmax_disc;
void disp_load_constants(float* table_color, PtrStepSzf table_space, int ndisp, int radius, short edge_disc, short max_disc)
{
cudaSafeCall( hipMemcpyToSymbol(ctable_color, &table_color, sizeof(table_color)) );
cudaSafeCall( hipMemcpyToSymbol(ctable_space, &table_space.data, sizeof(table_space.data)) );
size_t table_space_step = table_space.step / sizeof(float);
cudaSafeCall( hipMemcpyToSymbol(ctable_space_step, &table_space_step, sizeof(size_t)) );
cudaSafeCall( hipMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(cradius, &radius, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(cedge_disc, &edge_disc, sizeof(short)) );
cudaSafeCall( hipMemcpyToSymbol(cmax_disc, &max_disc, sizeof(short)) );
}
template <int channels>
struct DistRgbMax
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
uchar x = ::abs(a[0] - b[0]);
uchar y = ::abs(a[1] - b[1]);
uchar z = ::abs(a[2] - b[2]);
return (::max(::max(x, y), z));
}
};
template <>
struct DistRgbMax<1>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
return ::abs(a[0] - b[0]);
}
};
template <int channels, typename T>
__global__ void disp_bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);
T dp[5];
if (y > 0 && y < h - 1 && x > 0 && x < w - 1)
{
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
if(::abs(dp[1] - dp[0]) >= cedge_disc || ::abs(dp[2] - dp[0]) >= cedge_disc || ::abs(dp[3] - dp[0]) >= cedge_disc || ::abs(dp[4] - dp[0]) >= cedge_disc)
{
const int ymin = ::max(0, y - cradius);
const int xmin = ::max(0, x - cradius);
const int ymax = ::min(h - 1, y + cradius);
const int xmax = ::min(w - 1, x + cradius);
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
const uchar* ic = img + y * img_step + channels * x;
for(int yi = ymin; yi <= ymax; yi++)
{
const T* disp_y = disp + yi * disp_step;
for(int xi = xmin; xi <= xmax; xi++)
{
const uchar* in = img + yi * img_step + channels * xi;
uchar dist_rgb = DistRgbMax<channels>::calc(in, ic);
const float weight = ctable_color[dist_rgb] * (ctable_space + ::abs(y-yi)* ctable_space_step)[::abs(x-xi)];
const T disp_reg = disp_y[xi];
cost[0] += ::min(cmax_disc, ::abs(disp_reg - dp[0])) * weight;
cost[1] += ::min(cmax_disc, ::abs(disp_reg - dp[1])) * weight;
cost[2] += ::min(cmax_disc, ::abs(disp_reg - dp[2])) * weight;
cost[3] += ::min(cmax_disc, ::abs(disp_reg - dp[3])) * weight;
cost[4] += ::min(cmax_disc, ::abs(disp_reg - dp[4])) * weight;
}
}
float minimum = numeric_limits<float>::max();
int id = 0;
if (cost[0] < minimum)
{
minimum = cost[0];
id = 0;
}
if (cost[1] < minimum)
{
minimum = cost[1];
id = 1;
}
if (cost[2] < minimum)
{
minimum = cost[2];
id = 2;
}
if (cost[3] < minimum)
{
minimum = cost[3];
id = 3;
}
if (cost[4] < minimum)
{
minimum = cost[4];
id = 4;
}
*(disp + y * disp_step + x) = dp[id];
}
}
}
template <typename T>
void disp_bilateral_filter(PtrStepSz<T> disp, PtrStepSzb img, int channels, int iters, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x << 1);
grid.y = divUp(disp.rows, threads.y);
switch (channels)
{
case 1:
for (int i = 0; i < iters; ++i)
{
hipLaunchKernelGGL(( disp_bilateral_filter<1>), dim3(grid), dim3(threads), 0, stream, 0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( disp_bilateral_filter<1>), dim3(grid), dim3(threads), 0, stream, 1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( hipGetLastError() );
}
break;
case 3:
for (int i = 0; i < iters; ++i)
{
hipLaunchKernelGGL(( disp_bilateral_filter<3>), dim3(grid), dim3(threads), 0, stream, 0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( disp_bilateral_filter<3>), dim3(grid), dim3(threads), 0, stream, 1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( hipGetLastError() );
}
break;
default:
CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void disp_bilateral_filter<uchar>(PtrStepSz<uchar> disp, PtrStepSzb img, int channels, int iters, hipStream_t stream);
template void disp_bilateral_filter<short>(PtrStepSz<short> disp, PtrStepSzb img, int channels, int iters, hipStream_t stream);
} // namespace bilateral_filter
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
| 6176e080c9a4190ec527f5b9e85d3afbc9a4c2b4.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/limits.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace disp_bilateral_filter
{
__constant__ float* ctable_color;
__constant__ float* ctable_space;
__constant__ size_t ctable_space_step;
__constant__ int cndisp;
__constant__ int cradius;
__constant__ short cedge_disc;
__constant__ short cmax_disc;
void disp_load_constants(float* table_color, PtrStepSzf table_space, int ndisp, int radius, short edge_disc, short max_disc)
{
cudaSafeCall( cudaMemcpyToSymbol(ctable_color, &table_color, sizeof(table_color)) );
cudaSafeCall( cudaMemcpyToSymbol(ctable_space, &table_space.data, sizeof(table_space.data)) );
size_t table_space_step = table_space.step / sizeof(float);
cudaSafeCall( cudaMemcpyToSymbol(ctable_space_step, &table_space_step, sizeof(size_t)) );
cudaSafeCall( cudaMemcpyToSymbol(cndisp, &ndisp, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(cradius, &radius, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(cedge_disc, &edge_disc, sizeof(short)) );
cudaSafeCall( cudaMemcpyToSymbol(cmax_disc, &max_disc, sizeof(short)) );
}
template <int channels>
struct DistRgbMax
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
uchar x = ::abs(a[0] - b[0]);
uchar y = ::abs(a[1] - b[1]);
uchar z = ::abs(a[2] - b[2]);
return (::max(::max(x, y), z));
}
};
template <>
struct DistRgbMax<1>
{
static __device__ __forceinline__ uchar calc(const uchar* a, const uchar* b)
{
return ::abs(a[0] - b[0]);
}
};
template <int channels, typename T>
__global__ void disp_bilateral_filter(int t, T* disp, size_t disp_step, const uchar* img, size_t img_step, int h, int w)
{
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int x = ((blockIdx.x * blockDim.x + threadIdx.x) << 1) + ((y + t) & 1);
T dp[5];
if (y > 0 && y < h - 1 && x > 0 && x < w - 1)
{
dp[0] = *(disp + (y ) * disp_step + x + 0);
dp[1] = *(disp + (y-1) * disp_step + x + 0);
dp[2] = *(disp + (y ) * disp_step + x - 1);
dp[3] = *(disp + (y+1) * disp_step + x + 0);
dp[4] = *(disp + (y ) * disp_step + x + 1);
if(::abs(dp[1] - dp[0]) >= cedge_disc || ::abs(dp[2] - dp[0]) >= cedge_disc || ::abs(dp[3] - dp[0]) >= cedge_disc || ::abs(dp[4] - dp[0]) >= cedge_disc)
{
const int ymin = ::max(0, y - cradius);
const int xmin = ::max(0, x - cradius);
const int ymax = ::min(h - 1, y + cradius);
const int xmax = ::min(w - 1, x + cradius);
float cost[] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f};
const uchar* ic = img + y * img_step + channels * x;
for(int yi = ymin; yi <= ymax; yi++)
{
const T* disp_y = disp + yi * disp_step;
for(int xi = xmin; xi <= xmax; xi++)
{
const uchar* in = img + yi * img_step + channels * xi;
uchar dist_rgb = DistRgbMax<channels>::calc(in, ic);
const float weight = ctable_color[dist_rgb] * (ctable_space + ::abs(y-yi)* ctable_space_step)[::abs(x-xi)];
const T disp_reg = disp_y[xi];
cost[0] += ::min(cmax_disc, ::abs(disp_reg - dp[0])) * weight;
cost[1] += ::min(cmax_disc, ::abs(disp_reg - dp[1])) * weight;
cost[2] += ::min(cmax_disc, ::abs(disp_reg - dp[2])) * weight;
cost[3] += ::min(cmax_disc, ::abs(disp_reg - dp[3])) * weight;
cost[4] += ::min(cmax_disc, ::abs(disp_reg - dp[4])) * weight;
}
}
float minimum = numeric_limits<float>::max();
int id = 0;
if (cost[0] < minimum)
{
minimum = cost[0];
id = 0;
}
if (cost[1] < minimum)
{
minimum = cost[1];
id = 1;
}
if (cost[2] < minimum)
{
minimum = cost[2];
id = 2;
}
if (cost[3] < minimum)
{
minimum = cost[3];
id = 3;
}
if (cost[4] < minimum)
{
minimum = cost[4];
id = 4;
}
*(disp + y * disp_step + x) = dp[id];
}
}
}
template <typename T>
void disp_bilateral_filter(PtrStepSz<T> disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(disp.cols, threads.x << 1);
grid.y = divUp(disp.rows, threads.y);
switch (channels)
{
case 1:
for (int i = 0; i < iters; ++i)
{
disp_bilateral_filter<1><<<grid, threads, 0, stream>>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
disp_bilateral_filter<1><<<grid, threads, 0, stream>>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
}
break;
case 3:
for (int i = 0; i < iters; ++i)
{
disp_bilateral_filter<3><<<grid, threads, 0, stream>>>(0, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
disp_bilateral_filter<3><<<grid, threads, 0, stream>>>(1, disp.data, disp.step/sizeof(T), img.data, img.step, disp.rows, disp.cols);
cudaSafeCall( cudaGetLastError() );
}
break;
default:
CV_Error(cv::Error::BadNumChannels, "Unsupported channels count");
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void disp_bilateral_filter<uchar>(PtrStepSz<uchar> disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream);
template void disp_bilateral_filter<short>(PtrStepSz<short> disp, PtrStepSzb img, int channels, int iters, cudaStream_t stream);
} // namespace bilateral_filter
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
|
b4ef6915d8ec1912abe41964ed69e787bbc7215b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <thrust/system/hip/detail/hipcub/hipcub.hpp>
#include <iomanip>
#include "caffe/FRCNN/frcnn_proposal_layer.hpp"
#include "caffe/FRCNN/util/frcnn_utils.hpp"
#include "caffe/FRCNN/util/frcnn_helper.hpp"
#include "caffe/FRCNN/util/frcnn_param.hpp"
#include "caffe/FRCNN/util/frcnn_gpu_nms.hpp"
#include <iostream>
namespace caffe {
namespace Frcnn {
using std::vector;
__global__ void GetIndex(const int n,int *indices){
CUDA_KERNEL_LOOP(index , n){
indices[index] = index;
}
}
template <typename Dtype>
__global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox,
const int height, const int width, const int feat_stride,
const int im_height, const int im_width,
const int* sorted_indices, const float* anchors,
float* const transform_bbox) {
CUDA_KERNEL_LOOP(index , nthreads) {
const int score_idx = sorted_indices[index];
const int i = score_idx % width; // width
const int j = (score_idx % (width * height)) / width; // height
const int k = score_idx / (width * height); // channel
float *box = transform_bbox + index * 4;
box[0] = anchors[k * 4 + 0] + i * feat_stride;
box[1] = anchors[k * 4 + 1] + j * feat_stride;
box[2] = anchors[k * 4 + 2] + i * feat_stride;
box[3] = anchors[k * 4 + 3] + j * feat_stride;
const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] };
float src_w = box[2] - box[0] + 1;
float src_h = box[3] - box[1] + 1;
float src_ctr_x = box[0] + 0.5 * src_w;
float src_ctr_y = box[1] + 0.5 * src_h;
float pred_ctr_x = det[0] * src_w + src_ctr_x;
float pred_ctr_y = det[1] * src_h + src_ctr_y;
float pred_w = exp(det[2]) * src_w;
float pred_h = exp(det[3]) * src_h;
box[0] = pred_ctr_x - 0.5 * pred_w;
box[1] = pred_ctr_y - 0.5 * pred_h;
box[2] = pred_ctr_x + 0.5 * pred_w;
box[3] = pred_ctr_y + 0.5 * pred_h;
box[0] = max(0.0f, min(box[0], im_width - 1.0));
box[1] = max(0.0f, min(box[1], im_height - 1.0));
box[2] = max(0.0f, min(box[2], im_width - 1.0));
box[3] = max(0.0f, min(box[3], im_height - 1.0));
}
}
__global__ void SelectBox(const int nthreads, const float *box, float min_size,
int *flags) {
CUDA_KERNEL_LOOP(index , nthreads) {
if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) ||
(box[index * 4 + 3] - box[index * 4 + 1] < min_size)) {
flags[index] = 0;
} else {
flags[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices,
float *out_box, const Dtype *in_score, Dtype *out_score) {
CUDA_KERNEL_LOOP(index , nthreads) {
if ((index == 0 && selected_indices[index] == 1) ||
(index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) {
out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0];
out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1];
out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2];
out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3];
if (in_score!=NULL && out_score!=NULL) {
out_score[selected_indices[index] - 1] = in_score[index];
}
}
}
}
template <typename Dtype>
__global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices,
Dtype *top_data, const Dtype *in_score, Dtype* top_score) {
CUDA_KERNEL_LOOP(index , nthreads) {
top_data[index * 5] = 0;
int keep_idx = keep_indices[index];
for (int j = 1; j < 5; ++j) {
top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1];
}
if (top_score != NULL && in_score != NULL) {
top_score[index] = in_score[keep_idx];
}
}
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
Forward_cpu(bottom, top);
return ;
#if 0
DLOG(ERROR) << "========== enter proposal layer";
const Dtype *bottom_rpn_score = bottom[0]->gpu_data();
const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data();
// bottom data comes from host memory
Dtype bottom_im_info[3];
CHECK_EQ(bottom[2]->count(), 3);
CUDA_CHECK(hipMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, hipMemcpyDeviceToHost));
const int num = bottom[1]->num();
const int channes = bottom[1]->channels();
const int height = bottom[1]->height();
const int width = bottom[1]->width();
CHECK(num == 1) << "only single item batches are supported";
CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4";
const float im_height = bottom_im_info[0];
const float im_width = bottom_im_info[1];
int rpn_pre_nms_top_n;
int rpn_post_nms_top_n;
float rpn_nms_thresh;
int rpn_min_size;
if (this->phase_ == TRAIN) {
rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::rpn_nms_thresh;
rpn_min_size = FrcnnParam::rpn_min_size;
} else {
rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh;
rpn_min_size = FrcnnParam::test_rpn_min_size;
}
LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n;
LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n;
if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return;
const int config_n_anchors = FrcnnParam::anchors.size() / 4;
const int total_anchor_num = config_n_anchors * height * width;
//Step 1. -------------------------------Sort the rpn result----------------------
// the first half of rpn_score is the bg score
// Note that the sorting operator will change the order fg_scores (bottom_rpn_score)
Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]);
Dtype *sorted_scores = NULL;
CUDA_CHECK(hipMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num));
cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores);
int *indices = NULL;
CUDA_CHECK(hipMalloc((void**)&indices, sizeof(int) * total_anchor_num));
hipLaunchKernelGGL(( GetIndex), dim3(caffe::CAFFE_GET_BLOCKS(total_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
total_anchor_num, indices);
hipDeviceSynchronize();
int *sorted_indices = NULL;
CUDA_CHECK(hipMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num));
cub::DoubleBuffer<int> d_values(indices, sorted_indices);
void *sort_temp_storage_ = NULL;
size_t sort_temp_storage_bytes_ = 0;
// calculate the temp_storage_bytes
hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_;
CUDA_CHECK(hipMalloc(&sort_temp_storage_, sort_temp_storage_bytes_));
// sorting
hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
hipDeviceSynchronize();
//Step 2. ---------------------------bbox transform----------------------------
const int retained_anchor_num = ::min(total_anchor_num, rpn_pre_nms_top_n);
// float *transform_bbox = NULL;
// CUDA_CHECK(hipMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4));
hipLaunchKernelGGL(( BBoxTransformInv<Dtype>), dim3(caffe::CAFFE_GET_BLOCKS(retained_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride,
im_height, im_width, sorted_indices, anchors_, transform_bbox_);
hipDeviceSynchronize();
//Step 3. -------------------------filter out small box-----------------------
// select the box larger than min size
// int *selected_flags = NULL;
// CUDA_CHECK(hipMalloc(&selected_flags, sizeof(int) * retained_anchor_num));
hipLaunchKernelGGL(( SelectBox), dim3(caffe::CAFFE_GET_BLOCKS(retained_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_);
hipDeviceSynchronize();
// cumulative sum up the flags to get the copy index
int *selected_indices_ = NULL;
CUDA_CHECK(hipMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num));
void *cumsum_temp_storage_ = NULL;
size_t cumsum_temp_storage_bytes_ = 0;
hipcub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_,
selected_flags_, selected_indices_, retained_anchor_num);
DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_;
CUDA_CHECK(hipMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_));
hipcub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_,
selected_flags_, selected_indices_, retained_anchor_num);
// CUDA_CHECK(hipFree(cumsum_temp_storage));
int selected_num = -1;
hipMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), hipMemcpyDeviceToHost);
CHECK_GT(selected_num, 0);
Dtype *bbox_score_ = NULL;
if (top.size() == 2) CUDA_CHECK(hipMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num));
hipLaunchKernelGGL(( SelectBoxByIndices), dim3(caffe::CAFFE_GET_BLOCKS(selected_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_);
hipDeviceSynchronize();
//Step 4. -----------------------------apply nms-------------------------------
DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh;
vector<int> keep_indices(selected_num);
int keep_num = -1;
gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh);
DLOG(ERROR) << "rpn num after gpu nms: " << keep_num;
keep_num = ::min(keep_num, rpn_post_nms_top_n);
DLOG(ERROR) << "========== copy to top";
hipMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, hipMemcpyHostToDevice);
top[0]->Reshape(keep_num, 5, 1, 1);
Dtype *top_data = top[0]->mutable_gpu_data();
Dtype *top_score = NULL;
if (top.size() == 2) {
top[1]->Reshape(keep_num, 1, 1, 1);
top_score = top[1]->mutable_gpu_data();
}
hipLaunchKernelGGL(( SelectBoxAftNMS), dim3(caffe::CAFFE_GET_BLOCKS(keep_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0,
keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score);
DLOG(ERROR) << "========== exit proposal layer";
////////////////////////////////////
// do not forget to free the malloc memory
CUDA_CHECK(hipFree(sorted_scores));
CUDA_CHECK(hipFree(indices));
CUDA_CHECK(hipFree(sorted_indices));
CUDA_CHECK(hipFree(sort_temp_storage_));
CUDA_CHECK(hipFree(cumsum_temp_storage_));
CUDA_CHECK(hipFree(selected_indices_));
if (bbox_score_!=NULL) CUDA_CHECK(hipFree(bbox_score_));
#endif
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) {
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) {
NOT_IMPLEMENTED;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer);
} // namespace frcnn
} // namespace caffe
| b4ef6915d8ec1912abe41964ed69e787bbc7215b.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <thrust/system/cuda/detail/cub/cub.cuh>
#include <iomanip>
#include "caffe/FRCNN/frcnn_proposal_layer.hpp"
#include "caffe/FRCNN/util/frcnn_utils.hpp"
#include "caffe/FRCNN/util/frcnn_helper.hpp"
#include "caffe/FRCNN/util/frcnn_param.hpp"
#include "caffe/FRCNN/util/frcnn_gpu_nms.hpp"
#include <iostream>
namespace caffe {
namespace Frcnn {
using std::vector;
__global__ void GetIndex(const int n,int *indices){
CUDA_KERNEL_LOOP(index , n){
indices[index] = index;
}
}
template <typename Dtype>
__global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox,
const int height, const int width, const int feat_stride,
const int im_height, const int im_width,
const int* sorted_indices, const float* anchors,
float* const transform_bbox) {
CUDA_KERNEL_LOOP(index , nthreads) {
const int score_idx = sorted_indices[index];
const int i = score_idx % width; // width
const int j = (score_idx % (width * height)) / width; // height
const int k = score_idx / (width * height); // channel
float *box = transform_bbox + index * 4;
box[0] = anchors[k * 4 + 0] + i * feat_stride;
box[1] = anchors[k * 4 + 1] + j * feat_stride;
box[2] = anchors[k * 4 + 2] + i * feat_stride;
box[3] = anchors[k * 4 + 3] + j * feat_stride;
const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i],
bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] };
float src_w = box[2] - box[0] + 1;
float src_h = box[3] - box[1] + 1;
float src_ctr_x = box[0] + 0.5 * src_w;
float src_ctr_y = box[1] + 0.5 * src_h;
float pred_ctr_x = det[0] * src_w + src_ctr_x;
float pred_ctr_y = det[1] * src_h + src_ctr_y;
float pred_w = exp(det[2]) * src_w;
float pred_h = exp(det[3]) * src_h;
box[0] = pred_ctr_x - 0.5 * pred_w;
box[1] = pred_ctr_y - 0.5 * pred_h;
box[2] = pred_ctr_x + 0.5 * pred_w;
box[3] = pred_ctr_y + 0.5 * pred_h;
box[0] = max(0.0f, min(box[0], im_width - 1.0));
box[1] = max(0.0f, min(box[1], im_height - 1.0));
box[2] = max(0.0f, min(box[2], im_width - 1.0));
box[3] = max(0.0f, min(box[3], im_height - 1.0));
}
}
__global__ void SelectBox(const int nthreads, const float *box, float min_size,
int *flags) {
CUDA_KERNEL_LOOP(index , nthreads) {
if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) ||
(box[index * 4 + 3] - box[index * 4 + 1] < min_size)) {
flags[index] = 0;
} else {
flags[index] = 1;
}
}
}
template <typename Dtype>
__global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices,
float *out_box, const Dtype *in_score, Dtype *out_score) {
CUDA_KERNEL_LOOP(index , nthreads) {
if ((index == 0 && selected_indices[index] == 1) ||
(index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) {
out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0];
out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1];
out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2];
out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3];
if (in_score!=NULL && out_score!=NULL) {
out_score[selected_indices[index] - 1] = in_score[index];
}
}
}
}
template <typename Dtype>
__global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices,
Dtype *top_data, const Dtype *in_score, Dtype* top_score) {
CUDA_KERNEL_LOOP(index , nthreads) {
top_data[index * 5] = 0;
int keep_idx = keep_indices[index];
for (int j = 1; j < 5; ++j) {
top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1];
}
if (top_score != NULL && in_score != NULL) {
top_score[index] = in_score[keep_idx];
}
}
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
Forward_cpu(bottom, top);
return ;
#if 0
DLOG(ERROR) << "========== enter proposal layer";
const Dtype *bottom_rpn_score = bottom[0]->gpu_data();
const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data();
// bottom data comes from host memory
Dtype bottom_im_info[3];
CHECK_EQ(bottom[2]->count(), 3);
CUDA_CHECK(cudaMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, cudaMemcpyDeviceToHost));
const int num = bottom[1]->num();
const int channes = bottom[1]->channels();
const int height = bottom[1]->height();
const int width = bottom[1]->width();
CHECK(num == 1) << "only single item batches are supported";
CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4";
const float im_height = bottom_im_info[0];
const float im_width = bottom_im_info[1];
int rpn_pre_nms_top_n;
int rpn_post_nms_top_n;
float rpn_nms_thresh;
int rpn_min_size;
if (this->phase_ == TRAIN) {
rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::rpn_nms_thresh;
rpn_min_size = FrcnnParam::rpn_min_size;
} else {
rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n;
rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n;
rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh;
rpn_min_size = FrcnnParam::test_rpn_min_size;
}
LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n;
LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n;
if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return;
const int config_n_anchors = FrcnnParam::anchors.size() / 4;
const int total_anchor_num = config_n_anchors * height * width;
//Step 1. -------------------------------Sort the rpn result----------------------
// the first half of rpn_score is the bg score
// Note that the sorting operator will change the order fg_scores (bottom_rpn_score)
Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]);
Dtype *sorted_scores = NULL;
CUDA_CHECK(cudaMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num));
cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores);
int *indices = NULL;
CUDA_CHECK(cudaMalloc((void**)&indices, sizeof(int) * total_anchor_num));
GetIndex<<<caffe::CAFFE_GET_BLOCKS(total_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
total_anchor_num, indices);
cudaDeviceSynchronize();
int *sorted_indices = NULL;
CUDA_CHECK(cudaMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num));
cub::DoubleBuffer<int> d_values(indices, sorted_indices);
void *sort_temp_storage_ = NULL;
size_t sort_temp_storage_bytes_ = 0;
// calculate the temp_storage_bytes
cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_;
CUDA_CHECK(cudaMalloc(&sort_temp_storage_, sort_temp_storage_bytes_));
// sorting
cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_,
d_keys, d_values, total_anchor_num);
cudaDeviceSynchronize();
//Step 2. ---------------------------bbox transform----------------------------
const int retained_anchor_num = std::min(total_anchor_num, rpn_pre_nms_top_n);
// float *transform_bbox = NULL;
// CUDA_CHECK(cudaMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4));
BBoxTransformInv<Dtype><<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride,
im_height, im_width, sorted_indices, anchors_, transform_bbox_);
cudaDeviceSynchronize();
//Step 3. -------------------------filter out small box-----------------------
// select the box larger than min size
// int *selected_flags = NULL;
// CUDA_CHECK(cudaMalloc(&selected_flags, sizeof(int) * retained_anchor_num));
SelectBox<<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_);
cudaDeviceSynchronize();
// cumulative sum up the flags to get the copy index
int *selected_indices_ = NULL;
CUDA_CHECK(cudaMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num));
void *cumsum_temp_storage_ = NULL;
size_t cumsum_temp_storage_bytes_ = 0;
cub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_,
selected_flags_, selected_indices_, retained_anchor_num);
DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_;
CUDA_CHECK(cudaMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_));
cub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_,
selected_flags_, selected_indices_, retained_anchor_num);
// CUDA_CHECK(cudaFree(cumsum_temp_storage));
int selected_num = -1;
cudaMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), cudaMemcpyDeviceToHost);
CHECK_GT(selected_num, 0);
Dtype *bbox_score_ = NULL;
if (top.size() == 2) CUDA_CHECK(cudaMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num));
SelectBoxByIndices<<<caffe::CAFFE_GET_BLOCKS(selected_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_);
cudaDeviceSynchronize();
//Step 4. -----------------------------apply nms-------------------------------
DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh;
vector<int> keep_indices(selected_num);
int keep_num = -1;
gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh);
DLOG(ERROR) << "rpn num after gpu nms: " << keep_num;
keep_num = std::min(keep_num, rpn_post_nms_top_n);
DLOG(ERROR) << "========== copy to top";
cudaMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, cudaMemcpyHostToDevice);
top[0]->Reshape(keep_num, 5, 1, 1);
Dtype *top_data = top[0]->mutable_gpu_data();
Dtype *top_score = NULL;
if (top.size() == 2) {
top[1]->Reshape(keep_num, 1, 1, 1);
top_score = top[1]->mutable_gpu_data();
}
SelectBoxAftNMS<<<caffe::CAFFE_GET_BLOCKS(keep_num), caffe::CAFFE_CUDA_NUM_THREADS>>>(
keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score);
DLOG(ERROR) << "========== exit proposal layer";
////////////////////////////////////
// do not forget to free the malloc memory
CUDA_CHECK(cudaFree(sorted_scores));
CUDA_CHECK(cudaFree(indices));
CUDA_CHECK(cudaFree(sorted_indices));
CUDA_CHECK(cudaFree(sort_temp_storage_));
CUDA_CHECK(cudaFree(cumsum_temp_storage_));
CUDA_CHECK(cudaFree(selected_indices_));
if (bbox_score_!=NULL) CUDA_CHECK(cudaFree(bbox_score_));
#endif
}
template <typename Dtype>
void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top,
const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) {
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) {
NOT_IMPLEMENTED;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer);
} // namespace frcnn
} // namespace caffe
|
7ac9a18974bce27dcf5936f6dd258c0fbd8bf19e.hip | // !!! This is a file automatically generated by hipify!!!
#include <glad/glad.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_gl_interop.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include "../graphics/shaders/shader.h"
#include "../graphics/cameras/camera3d.h"
#include "../graphics/performanceMonitor.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <sstream>
#include <iostream>
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
#endif
using namespace std;
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// PASO 1 - DECLARACION DE CONSTANTES, COMO DIMENSIONES , VBO Y VALORES DE CAMARA
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
void runKernel(float3* pos, unsigned int mesh_width, unsigned int mesh_height, float time);
void runTest();
void runCuda(struct cudaGraphicsResource** vbo_resource, float time);
void runDisplay();
void deleteVBO(GLuint* vbo, struct cudaGraphicsResource* vbo_res);
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void mouse_callback(GLFWwindow* window, double xpos, double ypos);
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods);
void scroll_callback(GLFWwindow* window, double xoffset, double yoffset);
void processInput(GLFWwindow* window, bool *points);
const unsigned int window_width = 800;
const unsigned int window_height = 800;
const unsigned int mesh_width = 256;
const unsigned int mesh_height = 256;
// camera
Camera3D camera(glm::vec3(0.0f, 0.0f, 0.0f));
// vbo variables
GLuint vbo;
struct cudaGraphicsResource* cuda_vbo_resource;
void* d_vbo_buffer = NULL;
// timing
float deltaTime = 0.0f; // time between current frame and last frame
__global__ void points3dKernel(float3* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
u = u * 7.0f;
v = v * 7.0f;
// calculate simple sine wave pattern
float freq = 2.0f;
float w = sinf(u * freq + time) * cosf(v * freq + time) * 2.0f;
// write output vertex
pos[y * width + x] = make_float3(u, v, w);
}
int main()
{
bool cudaTest = false;
if (cudaTest)
runTest();
else
runDisplay();
}
void runTest()
{
void* returnData = malloc(mesh_width * mesh_height * sizeof(float));
// create VBO
hipMalloc((void**)&d_vbo_buffer, mesh_width * mesh_height * 3 * sizeof(float));
// execute the kernel
runKernel((float3*)d_vbo_buffer, mesh_width, mesh_height, 1.0f);
hipDeviceSynchronize();
hipMemcpy(returnData, d_vbo_buffer, mesh_width * mesh_height * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_vbo_buffer);
d_vbo_buffer = NULL;
free(returnData);
printf("Test passed");
}
void runKernel(float3* pos, unsigned int mesh_width, unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(16, 16, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
points3dKernel << < grid, block >> > (pos, mesh_width, mesh_height, time);
}
void runCuda(struct cudaGraphicsResource** vbo_resource, float time)
{
// map OpenGL buffer object for writing from CUDA
float3* dptr;
hipGraphicsMapResources(1, vbo_resource, 0);
size_t num_bytes;
hipGraphicsResourceGetMappedPointer((void**)&dptr, &num_bytes,
*vbo_resource);
//printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes);
runKernel(dptr, mesh_width, mesh_height, time);
// unmap buffer object
hipGraphicsUnmapResources(1, vbo_resource, 0);
}
void runDisplay()
{
// glfw: initialize and configure
// ------------------------------
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
string title = "3D Points interop CUDA";
// glfw window creation
// --------------------
GLFWwindow* window = glfwCreateWindow(window_width, window_height, title.c_str(), NULL, NULL);
if (window == NULL)
{
cout << "Failed to create GLFW window" << endl;
glfwTerminate();
return;
}
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
glfwSetCursorPosCallback(window, mouse_callback);
glfwSetMouseButtonCallback(window, mouse_button_callback);
glfwSetScrollCallback(window, scroll_callback);
// glad: load all OpenGL function pointers
// ---------------------------------------
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
cout << "Failed to initialize GLAD" << endl;
return;
}
// build and compile our shader program
// ------------------------------------
Shader sphereShader("../graphics/shaders/sphereMVPShader.vs", "../graphics/shaders/sphereMVPShader.fs");
// set up vertex data (and buffer(s)) and configure vertex attributes
// ------------------------------------------------------------------
unsigned int VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &vbo);
// bind the Vertex Array Object first, then bind and set vertex buffer(s), and then configure vertex attributes(s).
glBindVertexArray(VAO);
// create buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo);
// initialize buffer object
unsigned int size = mesh_width * mesh_height * 3 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
// register this buffer object with CUDA
hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, hipGraphicsMapFlagsWriteDiscard);
// position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
runCuda(&cuda_vbo_resource, 0.0f);
// You can unbind the VAO afterwards so other VAO calls won't accidentally modify this VAO, but this rarely happens. Modifying other
// VAOs requires a call to glBindVertexArray anyways so we generally don't unbind VAOs (nor VBOs) when it's not directly necessary.
glBindVertexArray(0);
float t1 = (float)glfwGetTime();
float t0 = (float)glfwGetTime();
float timer = 0.0f;
bool points = false;
PerformanceMonitor pMonitor(glfwGetTime(), 0.5f);
// render loop
// -----------
while (!glfwWindowShouldClose(window))
{
t1 = (float)glfwGetTime();
deltaTime = t1 - t0;
t0 = t1;
pMonitor.update(glfwGetTime());
stringstream ss;
ss << title << " " << pMonitor;
glfwSetWindowTitle(window, ss.str().c_str());
timer += deltaTime * 1.0f;
// input
// -----
processInput(window, &points);
runCuda(&cuda_vbo_resource, timer);
// render
// ------
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_PROGRAM_POINT_SIZE);
glEnable(GL_DEPTH_TEST);
// render the triangle
sphereShader.use();
//glPointSize(100.0f);
// pass projection matrix to shader (note that in this case it could change every frame)
glm::mat4 projection = glm::perspective(glm::radians(camera.Fovy), (float)window_width / (float)window_height, 0.1f, 100.0f);
sphereShader.setMat4("projection", projection);
// camera/view transformation
glm::mat4 view = camera.GetViewMatrix();
sphereShader.setMat4("view", view);
glm::mat4 model = glm::mat4(1.0f); // make sure to initialize matrix to identity matrix first
sphereShader.setMat4("model", model);
if (points)
{
sphereShader.setFloat("pointRadius", 1);
sphereShader.setFloat("pointScale", 1);
}
else {
sphereShader.setFloat("pointRadius", 0.125f * 0.5f);
sphereShader.setFloat("pointScale", window_height / glm::tan(camera.Fovy * 0.5f * (float)M_PI / 180.0f));
}
sphereShader.setVec3("Color", glm::vec3(1.0f, 0.0f, 0.0f));
sphereShader.setVec3("lightDir", glm::vec3(1.0f, 1.0f, 0.0f));
glBindVertexArray(VAO);
glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height);
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
// -------------------------------------------------------------------------------
glfwSwapBuffers(window);
glfwPollEvents();
}
// optional: de-allocate all resources once they've outlived their purpose:
// ------------------------------------------------------------------------
glDeleteVertexArrays(1, &VAO);
deleteVBO(&vbo, cuda_vbo_resource);
// glfw: terminate, clearing all previously allocated GLFW resources.
// ------------------------------------------------------------------
glfwTerminate();
return;
}
void deleteVBO(GLuint* vbo, struct cudaGraphicsResource* vbo_res)
{
// unregister this buffer object with CUDA
hipGraphicsUnregisterResource(vbo_res);
glBindBuffer(1, *vbo);
glDeleteBuffers(1, vbo);
*vbo = 0;
}
// glfw: whenever the window size changed (by OS or user resize) this callback function executes
// ---------------------------------------------------------------------------------------------
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
// make sure the viewport matches the new window dimensions; note that width and
// height will be significantly larger than specified on retina displays.
glViewport(0, 0, width, height);
}
// process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly
// ---------------------------------------------------------------------------------------------------------
void processInput(GLFWwindow* window, bool *points)
{
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, true);
if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS)
camera.ProcessKeyboardMovement(FORWARD, deltaTime);
if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS)
camera.ProcessKeyboardMovement(BACKWARD, deltaTime);
if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS)
camera.ProcessKeyboardMovement(LEFT, deltaTime);
if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS)
camera.ProcessKeyboardMovement(RIGHT, deltaTime);
if (glfwGetKey(window, GLFW_KEY_SPACE) == GLFW_PRESS)
camera.ProcessKeyboardMovement(ORIGIN, deltaTime);
if (glfwGetKey(window, GLFW_KEY_UP) == GLFW_PRESS)
camera.ProcessKeyboardRotation(AZIM_UP, deltaTime);
if (glfwGetKey(window, GLFW_KEY_DOWN) == GLFW_PRESS)
camera.ProcessKeyboardRotation(AZIM_DOWN, deltaTime);
if (glfwGetKey(window, GLFW_KEY_LEFT) == GLFW_PRESS)
camera.ProcessKeyboardRotation(ZEN_LEFT, deltaTime);
if (glfwGetKey(window, GLFW_KEY_RIGHT) == GLFW_PRESS)
camera.ProcessKeyboardRotation(ZEN_RIGHT, deltaTime);
if (glfwGetKey(window, GLFW_KEY_P) == GLFW_PRESS)
*points = true;
if (glfwGetKey(window, GLFW_KEY_P) == GLFW_RELEASE)
*points = false;
}
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
{
if (button == GLFW_MOUSE_BUTTON_LEFT && action == GLFW_PRESS)
{
camera.SetRotDrag(true);
}
if (button == GLFW_MOUSE_BUTTON_LEFT && action == GLFW_RELEASE)
{
camera.SetRotDrag(false);
}
if (button == GLFW_MOUSE_BUTTON_RIGHT && action == GLFW_PRESS)
{
camera.SetCenterDrag(true);
}
if (button == GLFW_MOUSE_BUTTON_RIGHT && action == GLFW_RELEASE)
{
camera.SetCenterDrag(false);
}
}
// glfw: whenever the mouse moves, this callback is called
// -------------------------------------------------------
void mouse_callback(GLFWwindow* window, double xpos, double ypos)
{
float posX = 2 * (xpos - window_width / 2) / window_width;
float posY = 2 * (window_height / 2 - ypos) / window_height;
camera.SetCurrentMousePos(posX, posY);
}
// glfw: whenever the mouse scroll wheel scrolls, this callback is called
// ----------------------------------------------------------------------
void scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
camera.ProcessMouseScroll(yoffset);
}
| 7ac9a18974bce27dcf5936f6dd258c0fbd8bf19e.cu | #include <glad/glad.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_gl_interop.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include "../graphics/shaders/shader.h"
#include "../graphics/cameras/camera3d.h"
#include "../graphics/performanceMonitor.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <sstream>
#include <iostream>
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795
#endif
using namespace std;
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// PASO 1 - DECLARACION DE CONSTANTES, COMO DIMENSIONES , VBO Y VALORES DE CAMARA
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
void runKernel(float3* pos, unsigned int mesh_width, unsigned int mesh_height, float time);
void runTest();
void runCuda(struct cudaGraphicsResource** vbo_resource, float time);
void runDisplay();
void deleteVBO(GLuint* vbo, struct cudaGraphicsResource* vbo_res);
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void mouse_callback(GLFWwindow* window, double xpos, double ypos);
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods);
void scroll_callback(GLFWwindow* window, double xoffset, double yoffset);
void processInput(GLFWwindow* window, bool *points);
const unsigned int window_width = 800;
const unsigned int window_height = 800;
const unsigned int mesh_width = 256;
const unsigned int mesh_height = 256;
// camera
Camera3D camera(glm::vec3(0.0f, 0.0f, 0.0f));
// vbo variables
GLuint vbo;
struct cudaGraphicsResource* cuda_vbo_resource;
void* d_vbo_buffer = NULL;
// timing
float deltaTime = 0.0f; // time between current frame and last frame
__global__ void points3dKernel(float3* pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float)width;
float v = y / (float)height;
u = u * 2.0f - 1.0f;
v = v * 2.0f - 1.0f;
u = u * 7.0f;
v = v * 7.0f;
// calculate simple sine wave pattern
float freq = 2.0f;
float w = sinf(u * freq + time) * cosf(v * freq + time) * 2.0f;
// write output vertex
pos[y * width + x] = make_float3(u, v, w);
}
int main()
{
bool cudaTest = false;
if (cudaTest)
runTest();
else
runDisplay();
}
void runTest()
{
void* returnData = malloc(mesh_width * mesh_height * sizeof(float));
// create VBO
cudaMalloc((void**)&d_vbo_buffer, mesh_width * mesh_height * 3 * sizeof(float));
// execute the kernel
runKernel((float3*)d_vbo_buffer, mesh_width, mesh_height, 1.0f);
cudaDeviceSynchronize();
cudaMemcpy(returnData, d_vbo_buffer, mesh_width * mesh_height * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_vbo_buffer);
d_vbo_buffer = NULL;
free(returnData);
printf("Test passed");
}
void runKernel(float3* pos, unsigned int mesh_width, unsigned int mesh_height, float time)
{
// execute the kernel
dim3 block(16, 16, 1);
dim3 grid(mesh_width / block.x, mesh_height / block.y, 1);
points3dKernel << < grid, block >> > (pos, mesh_width, mesh_height, time);
}
void runCuda(struct cudaGraphicsResource** vbo_resource, float time)
{
// map OpenGL buffer object for writing from CUDA
float3* dptr;
cudaGraphicsMapResources(1, vbo_resource, 0);
size_t num_bytes;
cudaGraphicsResourceGetMappedPointer((void**)&dptr, &num_bytes,
*vbo_resource);
//printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes);
runKernel(dptr, mesh_width, mesh_height, time);
// unmap buffer object
cudaGraphicsUnmapResources(1, vbo_resource, 0);
}
void runDisplay()
{
// glfw: initialize and configure
// ------------------------------
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
string title = "3D Points interop CUDA";
// glfw window creation
// --------------------
GLFWwindow* window = glfwCreateWindow(window_width, window_height, title.c_str(), NULL, NULL);
if (window == NULL)
{
cout << "Failed to create GLFW window" << endl;
glfwTerminate();
return;
}
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
glfwSetCursorPosCallback(window, mouse_callback);
glfwSetMouseButtonCallback(window, mouse_button_callback);
glfwSetScrollCallback(window, scroll_callback);
// glad: load all OpenGL function pointers
// ---------------------------------------
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
cout << "Failed to initialize GLAD" << endl;
return;
}
// build and compile our shader program
// ------------------------------------
Shader sphereShader("../graphics/shaders/sphereMVPShader.vs", "../graphics/shaders/sphereMVPShader.fs");
// set up vertex data (and buffer(s)) and configure vertex attributes
// ------------------------------------------------------------------
unsigned int VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &vbo);
// bind the Vertex Array Object first, then bind and set vertex buffer(s), and then configure vertex attributes(s).
glBindVertexArray(VAO);
// create buffer object
glBindBuffer(GL_ARRAY_BUFFER, vbo);
// initialize buffer object
unsigned int size = mesh_width * mesh_height * 3 * sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
// register this buffer object with CUDA
cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, cudaGraphicsMapFlagsWriteDiscard);
// position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
runCuda(&cuda_vbo_resource, 0.0f);
// You can unbind the VAO afterwards so other VAO calls won't accidentally modify this VAO, but this rarely happens. Modifying other
// VAOs requires a call to glBindVertexArray anyways so we generally don't unbind VAOs (nor VBOs) when it's not directly necessary.
glBindVertexArray(0);
float t1 = (float)glfwGetTime();
float t0 = (float)glfwGetTime();
float timer = 0.0f;
bool points = false;
PerformanceMonitor pMonitor(glfwGetTime(), 0.5f);
// render loop
// -----------
while (!glfwWindowShouldClose(window))
{
t1 = (float)glfwGetTime();
deltaTime = t1 - t0;
t0 = t1;
pMonitor.update(glfwGetTime());
stringstream ss;
ss << title << " " << pMonitor;
glfwSetWindowTitle(window, ss.str().c_str());
timer += deltaTime * 1.0f;
// input
// -----
processInput(window, &points);
runCuda(&cuda_vbo_resource, timer);
// render
// ------
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_PROGRAM_POINT_SIZE);
glEnable(GL_DEPTH_TEST);
// render the triangle
sphereShader.use();
//glPointSize(100.0f);
// pass projection matrix to shader (note that in this case it could change every frame)
glm::mat4 projection = glm::perspective(glm::radians(camera.Fovy), (float)window_width / (float)window_height, 0.1f, 100.0f);
sphereShader.setMat4("projection", projection);
// camera/view transformation
glm::mat4 view = camera.GetViewMatrix();
sphereShader.setMat4("view", view);
glm::mat4 model = glm::mat4(1.0f); // make sure to initialize matrix to identity matrix first
sphereShader.setMat4("model", model);
if (points)
{
sphereShader.setFloat("pointRadius", 1);
sphereShader.setFloat("pointScale", 1);
}
else {
sphereShader.setFloat("pointRadius", 0.125f * 0.5f);
sphereShader.setFloat("pointScale", window_height / glm::tan(camera.Fovy * 0.5f * (float)M_PI / 180.0f));
}
sphereShader.setVec3("Color", glm::vec3(1.0f, 0.0f, 0.0f));
sphereShader.setVec3("lightDir", glm::vec3(1.0f, 1.0f, 0.0f));
glBindVertexArray(VAO);
glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height);
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
// -------------------------------------------------------------------------------
glfwSwapBuffers(window);
glfwPollEvents();
}
// optional: de-allocate all resources once they've outlived their purpose:
// ------------------------------------------------------------------------
glDeleteVertexArrays(1, &VAO);
deleteVBO(&vbo, cuda_vbo_resource);
// glfw: terminate, clearing all previously allocated GLFW resources.
// ------------------------------------------------------------------
glfwTerminate();
return;
}
void deleteVBO(GLuint* vbo, struct cudaGraphicsResource* vbo_res)
{
// unregister this buffer object with CUDA
cudaGraphicsUnregisterResource(vbo_res);
glBindBuffer(1, *vbo);
glDeleteBuffers(1, vbo);
*vbo = 0;
}
// glfw: whenever the window size changed (by OS or user resize) this callback function executes
// ---------------------------------------------------------------------------------------------
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
// make sure the viewport matches the new window dimensions; note that width and
// height will be significantly larger than specified on retina displays.
glViewport(0, 0, width, height);
}
// process all input: query GLFW whether relevant keys are pressed/released this frame and react accordingly
// ---------------------------------------------------------------------------------------------------------
void processInput(GLFWwindow* window, bool *points)
{
if (glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, true);
if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS)
camera.ProcessKeyboardMovement(FORWARD, deltaTime);
if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS)
camera.ProcessKeyboardMovement(BACKWARD, deltaTime);
if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS)
camera.ProcessKeyboardMovement(LEFT, deltaTime);
if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS)
camera.ProcessKeyboardMovement(RIGHT, deltaTime);
if (glfwGetKey(window, GLFW_KEY_SPACE) == GLFW_PRESS)
camera.ProcessKeyboardMovement(ORIGIN, deltaTime);
if (glfwGetKey(window, GLFW_KEY_UP) == GLFW_PRESS)
camera.ProcessKeyboardRotation(AZIM_UP, deltaTime);
if (glfwGetKey(window, GLFW_KEY_DOWN) == GLFW_PRESS)
camera.ProcessKeyboardRotation(AZIM_DOWN, deltaTime);
if (glfwGetKey(window, GLFW_KEY_LEFT) == GLFW_PRESS)
camera.ProcessKeyboardRotation(ZEN_LEFT, deltaTime);
if (glfwGetKey(window, GLFW_KEY_RIGHT) == GLFW_PRESS)
camera.ProcessKeyboardRotation(ZEN_RIGHT, deltaTime);
if (glfwGetKey(window, GLFW_KEY_P) == GLFW_PRESS)
*points = true;
if (glfwGetKey(window, GLFW_KEY_P) == GLFW_RELEASE)
*points = false;
}
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
{
if (button == GLFW_MOUSE_BUTTON_LEFT && action == GLFW_PRESS)
{
camera.SetRotDrag(true);
}
if (button == GLFW_MOUSE_BUTTON_LEFT && action == GLFW_RELEASE)
{
camera.SetRotDrag(false);
}
if (button == GLFW_MOUSE_BUTTON_RIGHT && action == GLFW_PRESS)
{
camera.SetCenterDrag(true);
}
if (button == GLFW_MOUSE_BUTTON_RIGHT && action == GLFW_RELEASE)
{
camera.SetCenterDrag(false);
}
}
// glfw: whenever the mouse moves, this callback is called
// -------------------------------------------------------
void mouse_callback(GLFWwindow* window, double xpos, double ypos)
{
float posX = 2 * (xpos - window_width / 2) / window_width;
float posY = 2 * (window_height / 2 - ypos) / window_height;
camera.SetCurrentMousePos(posX, posY);
}
// glfw: whenever the mouse scroll wheel scrolls, this callback is called
// ----------------------------------------------------------------------
void scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
camera.ProcessMouseScroll(yoffset);
}
|
6c2296054aa0b5e7836661cb6eca1dc7ec7d9eb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <iostream>
# include <stdlib.h>
using namespace std;
__global__ void reverseArray(int * array, int n) {
int blockId_x = blockIdx.x;
int threadId_x = threadIdx.x;
int index_1 = blockDim.x * blockId_x + threadId_x;
int index_2 = (blockDim.x * gridDim.x) + index_1;
int iter = index_1;
int max_iter = n / 2;
while (iter < max_iter){
int temp = array[index_1];
int pair_index = n-index_1-1;
array[index_1] = array[pair_index];
array[pair_index] = temp;
iter += index_2;
}
}
int main() {
int *host_array;
int *host_array_reverse;
int size = 16*1024*1024;
int *device_array;
// max kernel size
int num_threads_per_block = 256;
int num_blocks = size/num_threads_per_block;
size_t mem_size = num_blocks * num_threads_per_block * sizeof(int);
host_array = (int*) malloc(mem_size);
host_array_reverse = (int*) malloc(mem_size);
hipMalloc((void **) &device_array, mem_size);
for (int i = 0; i < size; i++)
{
host_array[i] = rand() % 100;
}
hipMemcpy(device_array, host_array, mem_size, hipMemcpyHostToDevice);
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_threads_per_block);
hipLaunchKernelGGL(( reverseArray), dim3(dimGrid), dim3(dimBlock) , 0, 0, device_array, size);
hipDeviceSynchronize();
hipMemcpy(host_array_reverse, device_array, mem_size, hipMemcpyDeviceToHost);
bool correct = true;
for (int i = 0; i < size; i++)
{
if (host_array_reverse[i] != host_array[size-1-i]) {
correct = false;
break;
}
}
if (correct) {
printf("Array Reversed Correctly!\n");
} else {
printf("Something wrong with array reverse operation.\n");
}
hipFree(device_array);
free(host_array);
free(host_array_reverse);
return 0;
} | 6c2296054aa0b5e7836661cb6eca1dc7ec7d9eb5.cu | # include <iostream>
# include <stdlib.h>
using namespace std;
__global__ void reverseArray(int * array, int n) {
int blockId_x = blockIdx.x;
int threadId_x = threadIdx.x;
int index_1 = blockDim.x * blockId_x + threadId_x;
int index_2 = (blockDim.x * gridDim.x) + index_1;
int iter = index_1;
int max_iter = n / 2;
while (iter < max_iter){
int temp = array[index_1];
int pair_index = n-index_1-1;
array[index_1] = array[pair_index];
array[pair_index] = temp;
iter += index_2;
}
}
int main() {
int *host_array;
int *host_array_reverse;
int size = 16*1024*1024;
int *device_array;
// max kernel size
int num_threads_per_block = 256;
int num_blocks = size/num_threads_per_block;
size_t mem_size = num_blocks * num_threads_per_block * sizeof(int);
host_array = (int*) malloc(mem_size);
host_array_reverse = (int*) malloc(mem_size);
cudaMalloc((void **) &device_array, mem_size);
for (int i = 0; i < size; i++)
{
host_array[i] = rand() % 100;
}
cudaMemcpy(device_array, host_array, mem_size, cudaMemcpyHostToDevice);
dim3 dimGrid(num_blocks);
dim3 dimBlock(num_threads_per_block);
reverseArray<<< dimGrid, dimBlock >>>(device_array, size);
cudaThreadSynchronize();
cudaMemcpy(host_array_reverse, device_array, mem_size, cudaMemcpyDeviceToHost);
bool correct = true;
for (int i = 0; i < size; i++)
{
if (host_array_reverse[i] != host_array[size-1-i]) {
correct = false;
break;
}
}
if (correct) {
printf("Array Reversed Correctly!\n");
} else {
printf("Something wrong with array reverse operation.\n");
}
cudaFree(device_array);
free(host_array);
free(host_array_reverse);
return 0;
} |
b9da282c175ee7c9e52dba2595f8e47db089c8c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This is a CUDA code that performs an iterative reverse edge
* detection algorithm.
*
* Training material developed by James Perry and Alan Gray
* Copyright EPCC, The University of Edinburgh, 2013
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//#include <sys/types.h>
//#include <sys/time.h>
#include "reconstruct.h"
#include<chrono>
/* Data buffer to read edge data into */
float edge[N][N];
/* Data buffer for the resulting image */
float img[N][N];
/* Work buffers, with halos */
float host_input[N+2][N+2];
float gpu_output[N+2][N+2];
float host_output[N+2][N+2];
using std::chrono::time_point;
using std::chrono::system_clock;
using std::chrono::duration_cast;
int main(int argc, char *argv[])
{
int x, y;
int i;
int errors;
time_point<system_clock> start_time_inc_data, end_time_inc_data;
time_point<system_clock> cpu_start_time, cpu_end_time;
float *d_input, *d_output, *d_edge;
size_t memSize = (N + 2) * (N + 2) * sizeof(float);
printf("Image size: %dx%d\n", N, N);
printf("ITERATIONS: %d\n", ITERATIONS);
printf("THREADSPERBLOCK: %d\n", THREADSPERBLOCK);
if (N%THREADSPERBLOCK != 0) {
printf("Error: THREADSPERBLOCK must exactly divide N\n");
exit(1);
}
/* allocate memory on device */
hipMalloc((void**)&d_input, memSize);
hipMalloc((void**)&d_output, memSize);
hipMalloc((void**)&d_edge, memSize);
/* read in edge data */
datread("edge2048x2048.dat", (void *)edge, N, N);
/* zero buffer so that halo is zeroed */
for (y = 0; y < N + 2; y++) {
for (x = 0; x < N + 2; x++) {
host_input[y][x] = 0.0;
}
}
/* copy input to buffer with halo */
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
host_input[y + 1][x + 1] = edge[y][x];
}
}
/*
* copy to all the GPU arrays. d_output doesn't need to have this data but
* this will zero its halo
*/
start_time_inc_data = get_current_time();
hipMemcpy(d_input, host_input, memSize, hipMemcpyHostToDevice);
hipMemcpy(d_output, host_input, memSize, hipMemcpyHostToDevice);
hipMemcpy(d_edge, host_input, memSize, hipMemcpyHostToDevice);
/* run on GPU */
for (i = 0; i < ITERATIONS; i++) {
/* run the kernel */
dim3 blocksPerGrid(N / THREADPERBLOCK_X, N / THREADPERBLOCK_Y, 1);
dim3 threadsPerBlock(THREADPERBLOCK_X, THREADPERBLOCK_Y, 1);
hipLaunchKernelGGL(( inverseEdgeDetect) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_output, d_input, d_edge);
hipDeviceSynchronize();
float* swap = d_input;
d_input = d_output;
d_output = swap;
/* copy the data back from the output buffer on the device */
//hipMemcpy(gpu_output, d_output, memSize, hipMemcpyDeviceToHost);
/* copy the new data to the input buffer on the device */
//hipMemcpy(d_input, gpu_output, memSize, hipMemcpyHostToDevice);
}
hipMemcpy(gpu_output, d_input, memSize, hipMemcpyDeviceToHost);
end_time_inc_data = get_current_time();
checkCUDAError("Main loop");
/*
* run on host for comparison
*/
cpu_start_time = get_current_time();
for (i = 0; i < ITERATIONS; i++) {
/* perform stencil operation */
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
host_output[y + 1][x + 1] = (host_input[y + 1][x] + host_input[y + 1][x + 2] +
host_input[y][x + 1] + host_input[y + 2][x + 1] \
- edge[y][x]) * 0.25;
}
}
/* copy output back to input buffer */
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
host_input[y + 1][x + 1] = host_output[y + 1][x + 1];
}
}
}
cpu_end_time = get_current_time();
/* Maximum difference allowed between host result and GPU result */
#define MAX_DIFF 0.01
/* check that GPU result matches host result */
errors = 0;
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
float diff = fabs(gpu_output[y + 1][x + 1] - host_output[y + 1][x + 1]);
if (diff >= MAX_DIFF) {
errors++;
//printf("Error at %d,%d (CPU=%f, GPU=%f)\n", x, y, \
// host_output[y+1][x+1], \
// gpu_output[y+1][x+1]);
}
}
}
if (errors == 0)
printf("\n\n ***TEST PASSED SUCCESSFULLY*** \n\n\n");
else
printf("\n\n ***ERROR: TEST FAILED*** \n\n\n");
/* copy result to output buffer */
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
img[y][x] = gpu_output[y + 1][x + 1];
}
}
/* write PGM */
pgmwrite("output.pgm", (void *)img, N, N);
hipFree(d_input);
hipFree(d_output);
hipFree(d_edge);
printf("GPU Time (Including Data Transfer): %lld ms\n", \
duration_cast<std::chrono::milliseconds>(end_time_inc_data - start_time_inc_data).count());
printf("CPU Time : %lld ms\n", \
duration_cast<std::chrono::milliseconds>(cpu_end_time - cpu_start_time).count());
return 0;
}
| b9da282c175ee7c9e52dba2595f8e47db089c8c8.cu | /*
* This is a CUDA code that performs an iterative reverse edge
* detection algorithm.
*
* Training material developed by James Perry and Alan Gray
* Copyright EPCC, The University of Edinburgh, 2013
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//#include <sys/types.h>
//#include <sys/time.h>
#include "reconstruct.h"
#include<chrono>
/* Data buffer to read edge data into */
float edge[N][N];
/* Data buffer for the resulting image */
float img[N][N];
/* Work buffers, with halos */
float host_input[N+2][N+2];
float gpu_output[N+2][N+2];
float host_output[N+2][N+2];
using std::chrono::time_point;
using std::chrono::system_clock;
using std::chrono::duration_cast;
int main(int argc, char *argv[])
{
int x, y;
int i;
int errors;
time_point<system_clock> start_time_inc_data, end_time_inc_data;
time_point<system_clock> cpu_start_time, cpu_end_time;
float *d_input, *d_output, *d_edge;
size_t memSize = (N + 2) * (N + 2) * sizeof(float);
printf("Image size: %dx%d\n", N, N);
printf("ITERATIONS: %d\n", ITERATIONS);
printf("THREADSPERBLOCK: %d\n", THREADSPERBLOCK);
if (N%THREADSPERBLOCK != 0) {
printf("Error: THREADSPERBLOCK must exactly divide N\n");
exit(1);
}
/* allocate memory on device */
cudaMalloc((void**)&d_input, memSize);
cudaMalloc((void**)&d_output, memSize);
cudaMalloc((void**)&d_edge, memSize);
/* read in edge data */
datread("edge2048x2048.dat", (void *)edge, N, N);
/* zero buffer so that halo is zeroed */
for (y = 0; y < N + 2; y++) {
for (x = 0; x < N + 2; x++) {
host_input[y][x] = 0.0;
}
}
/* copy input to buffer with halo */
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
host_input[y + 1][x + 1] = edge[y][x];
}
}
/*
* copy to all the GPU arrays. d_output doesn't need to have this data but
* this will zero its halo
*/
start_time_inc_data = get_current_time();
cudaMemcpy(d_input, host_input, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_output, host_input, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_edge, host_input, memSize, cudaMemcpyHostToDevice);
/* run on GPU */
for (i = 0; i < ITERATIONS; i++) {
/* run the kernel */
dim3 blocksPerGrid(N / THREADPERBLOCK_X, N / THREADPERBLOCK_Y, 1);
dim3 threadsPerBlock(THREADPERBLOCK_X, THREADPERBLOCK_Y, 1);
inverseEdgeDetect <<< blocksPerGrid, threadsPerBlock >>> (d_output, d_input, d_edge);
cudaThreadSynchronize();
float* swap = d_input;
d_input = d_output;
d_output = swap;
/* copy the data back from the output buffer on the device */
//cudaMemcpy(gpu_output, d_output, memSize, cudaMemcpyDeviceToHost);
/* copy the new data to the input buffer on the device */
//cudaMemcpy(d_input, gpu_output, memSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(gpu_output, d_input, memSize, cudaMemcpyDeviceToHost);
end_time_inc_data = get_current_time();
checkCUDAError("Main loop");
/*
* run on host for comparison
*/
cpu_start_time = get_current_time();
for (i = 0; i < ITERATIONS; i++) {
/* perform stencil operation */
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
host_output[y + 1][x + 1] = (host_input[y + 1][x] + host_input[y + 1][x + 2] +
host_input[y][x + 1] + host_input[y + 2][x + 1] \
- edge[y][x]) * 0.25;
}
}
/* copy output back to input buffer */
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
host_input[y + 1][x + 1] = host_output[y + 1][x + 1];
}
}
}
cpu_end_time = get_current_time();
/* Maximum difference allowed between host result and GPU result */
#define MAX_DIFF 0.01
/* check that GPU result matches host result */
errors = 0;
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
float diff = fabs(gpu_output[y + 1][x + 1] - host_output[y + 1][x + 1]);
if (diff >= MAX_DIFF) {
errors++;
//printf("Error at %d,%d (CPU=%f, GPU=%f)\n", x, y, \
// host_output[y+1][x+1], \
// gpu_output[y+1][x+1]);
}
}
}
if (errors == 0)
printf("\n\n ***TEST PASSED SUCCESSFULLY*** \n\n\n");
else
printf("\n\n ***ERROR: TEST FAILED*** \n\n\n");
/* copy result to output buffer */
for (y = 0; y < N; y++) {
for (x = 0; x < N; x++) {
img[y][x] = gpu_output[y + 1][x + 1];
}
}
/* write PGM */
pgmwrite("output.pgm", (void *)img, N, N);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_edge);
printf("GPU Time (Including Data Transfer): %lld ms\n", \
duration_cast<std::chrono::milliseconds>(end_time_inc_data - start_time_inc_data).count());
printf("CPU Time : %lld ms\n", \
duration_cast<std::chrono::milliseconds>(cpu_end_time - cpu_start_time).count());
return 0;
}
|
bedba927e6f8433af2747d9199c8529e791a4cda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fastertransformer/common.h"
#include "cuda_kernels.h"
#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <climits>
#include <cfloat>
namespace fastertransformer{
template <typename T>
__inline__ __device__
T gelu(T x)
{
float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x))));
return x * cdf;
}
template <>
__inline__ __device__
half2 gelu(half2 val)
{
half2 val_pow3 = __hmul2(val, __hmul2(val, val));
float2 tmp_pow = __half22float2(val_pow3);
float2 tmp = __half22float2(val);
tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x))));
tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y))));
return __hmul2(val, __float22half2_rn(tmp));
}
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f;
val = warpReduceSum(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax<T>(val);
return val;
}
template <typename T>
__global__
void add_bias_act(T* out, const T* bias, int m, int n)
{
T val, reg_bias;
int row_id = blockIdx.x;
int ite = n / blockDim.x;
int tid = threadIdx.x;
for(int i = 0; i < ite; ++i)
{
reg_bias = __ldg(&bias[i * blockDim.x + tid]);
row_id = blockIdx.x;
while(row_id < m){
val = out[tid + i * blockDim.x + row_id * n]+ reg_bias;
out[tid + i * blockDim.x + row_id * n] = gelu<T>(val);
row_id += gridDim.x;
}
}
}
template <>
__global__
void add_bias_act(half* out, const half* bias, int m, int n)
{
half2 val, reg_bias;
int row_id = blockIdx.x;
int ite = n / blockDim.x / 2;
int tid = threadIdx.x;
half2* out_ptr = (half2*) out;
const half2* bias_ptr = (half2*) bias;
for(int i = 0; i < ite; ++i)
{
reg_bias = __ldg(&bias_ptr[i * blockDim.x + tid]);
row_id = blockIdx.x;
while(row_id < m){
val = out_ptr[tid + i * blockDim.x + row_id * n / 2];
val = __hadd2(val, reg_bias);
out_ptr[tid + i * blockDim.x + row_id * n / 2] = gelu<half2>(val);
row_id += gridDim.x;
}
}
}
template <typename T>
__global__
void add_bias_input_layernorm(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = 0.0f;
local_out += (float)(out[blockIdx.x * n + tid] + input[blockIdx.x * n + tid] + __ldg(&bias[tid]));
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean));
if(threadIdx.x == 0)
s_variance = variance / n + 1e-6f;
__syncthreads();
out[blockIdx.x * n + tid] =
(T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid])));
}
template <>
__global__
void add_bias_input_layernorm(half* out, const half* input, const half* bias,
const half* gamma, const half* beta, int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
half2* out_ptr = (half2*)out;
const half2* input_ptr = (const half2*)input;
const half2* bias_ptr = (const half2*)bias;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
float local_out = 0.0f;
int id = blockIdx.x * n / 2 + tid;
local_out_fp2 = __half22float2(__hadd2(__hadd2(out_ptr[id], input_ptr[id]), __ldg(&bias_ptr[tid])));
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean);
variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean);
variance = blockReduceSum<float>(variance);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6f);
__syncthreads();
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
out_ptr[id] = __float22half2_rn(local_out_fp2);
}
template <typename T>
__global__
void add_bias_input_layernorm_v2(T* out, const T* __restrict input, const T* __restrict bias,
const T* __restrict gamma, const T* __restrict beta, int n)
{
const int ite = 4;
const int tid = threadIdx.x;
const int bid = blockIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out[ite];
float sum = 0.0f;
#pragma unroll
for(int i = 0; i < ite; i++)
{
int col_id = i * blockDim.x + tid;
int id = bid * n + col_id;
local_out[i] = (float)(out[id] + __ldg(&input[id]) + __ldg(&bias[col_id]));
sum += local_out[i];
}
mean = blockReduceSum<float>(sum);
if(tid == 0)
s_mean = mean / n;
__syncthreads();
float var = 0.0f;
#pragma unroll
for(int i = 0; i < ite; i++)
{
float diff = local_out[i] - s_mean;
var += diff * diff;
}
variance = blockReduceSum<float>(var);
if(tid == 0)
s_variance = rsqrtf(variance / n + 1e-6f);
__syncthreads();
#pragma unroll
for(int i = 0; i < ite; i++)
{
int col_id = i * blockDim.x + tid;
int id = bid * n + col_id;
out[id] = (T)((local_out[i] - s_mean) * s_variance * (float)__ldg(&gamma[col_id]) + (float)__ldg(&beta[col_id]));
}
}
template <>
__global__
void add_bias_input_layernorm_v2(half* out, const half* __restrict input, const half* __restrict bias,
const half* __restrict gamma, const half* __restrict beta, int n)
{
const int ite = 4;
const int tid = threadIdx.x;
const int bid = blockIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
half2 local_out_half2[ite];
half2* out_ptr = (half2*)out;
const half2* input_ptr = (const half2*)input;
const half2* bias_ptr = (const half2*)bias;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
// float sum = 0.0f;
half2 sum = __float2half2_rn(0.0f);
#pragma unroll
for(int i = 0; i < ite; i++)
{
int col_id = i * blockDim.x + tid;
int id = bid * n / 2 + col_id;
local_out_half2[i] = out_ptr[id] + __ldg(&input_ptr[id]) + __ldg(&bias_ptr[col_id]);
sum += local_out_half2[i];
}
mean = blockReduceSum<float>((float)(sum.x + sum.y));
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
float var = 0.0f;
half2 s_mean_2 = __float2half2_rn(s_mean);
#pragma unroll
for(int i = 0; i < ite; i++)
{
local_out_half2[i] = local_out_half2[i] - s_mean_2;
float v1 = (float)local_out_half2[i].x;
float v2 = (float)local_out_half2[i].y;
var += v1 * v1 + v2 * v2;
}
variance = blockReduceSum<float>(var);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6f);
__syncthreads();
half2 s_var_2 = __float2half2_rn(s_variance);
#pragma unroll
for(int i = 0; i < ite; i++)
{
int col_id = i * blockDim.x + tid;
int id = bid * n / 2 + col_id;
out_ptr[id] = local_out_half2[i] * s_var_2 * __ldg(&gamma_ptr[col_id]) + __ldg(&beta_ptr[col_id]);
}
}
template <typename T>
void add_bias_act_kernelLauncher(T* out, const T* bias, int m, int n, hipStream_t stream)
{
dim3 grid(ceil(m / 4.));
dim3 block(n / 4);
assert(block.x <= 1024);
hipLaunchKernelGGL(( add_bias_act<T>), dim3(grid), dim3(block), 0, stream, out, bias, m, n);
}
template<typename T>
void add_bias_input_layernorm_kernelLauncher(T* out, const T* input, const T* bias,
const T* gamma, const T* beta, int m, int n, hipStream_t stream)
{
dim3 grid(m);
dim3 block(n);
assert(n <= 1024);
if(n == 768 || n == 1024)
hipLaunchKernelGGL(( add_bias_input_layernorm_v2<T>), dim3(grid), dim3(n / 4), 0, stream, out, input, bias, gamma, beta, n);
else
hipLaunchKernelGGL(( add_bias_input_layernorm<T>), dim3(grid), dim3(block), 0, stream, out, input, bias, gamma, beta, m, n);
}
template <>
void add_bias_input_layernorm_kernelLauncher(half* out, const half* input, const half* bias,
const half* gamma, const half* beta, int m, int n, hipStream_t stream)
{
dim3 grid(m);
dim3 block(n / 2);
assert(n / 2 <= 1024);
if(m >= 512 && (n == 768 || n == 1024))
hipLaunchKernelGGL(( add_bias_input_layernorm_v2<half>), dim3(grid), dim3(n / 8), 0, stream, out, input, bias, gamma, beta, n);
else
hipLaunchKernelGGL(( add_bias_input_layernorm<half>), dim3(grid), dim3(block), 0, stream, out, input, bias, gamma, beta, m, n);
}
template <typename T>
__global__ void update_logits_kernel(T* logits, const T* bias, const int end_id, const bool* finished, const int n)
{
int bid = blockIdx.x;
bool finish = finished[bid];
int offset = bid * n;
float max_val = -1 * FLT_MAX;
__shared__ float s_max_val;
__shared__ float s_sum_val;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
if(finish)
logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX;
else
logits[offset + tid] += bias[tid];
max_val = max(max_val, logits[offset + tid]);
}
max_val = blockReduceMax<float>((float)max_val);
if(threadIdx.x == 0)
s_max_val = max_val;
__syncthreads();
float sum_val = 0.0f;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
logits[offset + tid] = __expf((float)logits[offset + tid] - s_max_val);
sum_val += (float)logits[offset + tid];
}
sum_val = blockReduceSum<float>(sum_val);
if(threadIdx.x == 0)
s_sum_val = sum_val;
__syncthreads();
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
logits[offset + tid] = logf((float)logits[offset + tid] / s_sum_val);
}
}
template <typename T>
__global__ void update_logits_kernel_without_softmax(T* logits, const T* bias, const int end_id, const bool* finished, const int n)
{
int bid = blockIdx.x;
bool finish = finished[bid];
int offset = bid * n;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
if(finish)
logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX;
else
logits[offset + tid] += bias[tid];
}
}
template <typename T>
__global__ void update_logits_kernel_without_log(T* logits, const T* bias, const int end_id, const bool* finished, const int n)
{
int bid = blockIdx.x;
bool finish = finished[bid];
int offset = bid * n;
float max_val = -1 * FLT_MAX;
__shared__ float s_max_val;
__shared__ float s_sum_val;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
if(finish)
logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX;
else
logits[offset + tid] += bias[tid];
max_val = max(max_val, logits[offset + tid]);
}
max_val = blockReduceMax<float>((float)max_val);
if(threadIdx.x == 0)
s_max_val = max_val;
__syncthreads();
float sum_val = 0.0f;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
logits[offset + tid] = __expf((float)logits[offset + tid] - s_max_val);
sum_val += (float)logits[offset + tid];
}
sum_val = blockReduceSum<float>(sum_val);
if(threadIdx.x == 0)
s_sum_val = sum_val;
__syncthreads();
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
logits[offset + tid] = ((float)logits[offset + tid] / s_sum_val);
}
}
template<typename T>
__global__ void remove_sequence_length_padding(const T* src, T* tgt,
const int* tmp_mask_offset,
int* mask_offset,
const int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
mask_offset[bid] = tmp_mask_offset[bid];
const int src_seq_id = bid + mask_offset[bid];
const int tgt_seq_id = bid;
for(int i = tid; i < n; i += blockDim.x)
{
tgt[tgt_seq_id * n + i] = src[src_seq_id * n + i];
}
}
template<typename T>
void remove_sequence_length_padding_kernelLauncher(const T* src, T* tgt,
const int* tmp_mask_offset,
int* mask_offset,
const int m, const int n, hipStream_t stream)
{
// src: [batch_size*max_seq_len, hidden_dim]
// tgt: [valid_word_num, hidden_dim]
hipLaunchKernelGGL(( remove_sequence_length_padding), dim3(m), dim3(256), 0, stream, src, tgt, tmp_mask_offset, mask_offset, n);
}
template<typename T>
__global__ void rebuild_sequence_length_padding(const T* src, T* tgt,
const int* mask_offset,
const int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int tgt_seq_id = bid + mask_offset[bid];
const int src_seq_id = bid;
for(int i = tid; i < n; i += blockDim.x)
{
tgt[tgt_seq_id * n + i] = src[src_seq_id * n + i];
}
}
template<typename T>
void rebuild_sequence_length_padding_kernelLauncher(const T* src, T* tgt,
const int* mask_offset, const int m,
const int n, hipStream_t stream)
{
// src: [valid_word_num, hidden_dim]
// tgt: [batch_size*max_seq_len, hidden_dim]
hipLaunchKernelGGL(( rebuild_sequence_length_padding), dim3(m), dim3(256), 0, stream, src, tgt, mask_offset, n);
}
__global__ void build_sequence_length_padding_offset(const int* sequence_length,
const int batch_size, const int max_seq_len, int* valid_word_num, int* tmp_mask_offset)
{
// do cumulated sum
int total_seq_len = 0;
int cum_offset = 0;
int index = 0;
for(int i = 0; i < batch_size; i++)
{
const int seq_len = sequence_length[i];
for(int j = 0; j < seq_len; j++)
{
tmp_mask_offset[index] = cum_offset;
index++;
}
cum_offset += max_seq_len - seq_len;
total_seq_len += seq_len;
}
valid_word_num[0] = total_seq_len;
}
void build_sequence_length_padding_offset_kernelLauncher(const int* sequence_length,
const int batch_size, const int max_seq_len, int* valid_word_num, int* tmp_mask_offset,
hipStream_t stream)
{
hipLaunchKernelGGL(( build_sequence_length_padding_offset), dim3(1), dim3(1), 0, stream, sequence_length,
batch_size, max_seq_len, valid_word_num, tmp_mask_offset);
}
template void rebuild_sequence_length_padding_kernelLauncher(const float* src, float* tgt,
const int* mask_offset, const int m,
const int n, hipStream_t stream);
template void rebuild_sequence_length_padding_kernelLauncher(const half* src, half* tgt,
const int* mask_offset, const int m,
const int n, hipStream_t stream);
template void remove_sequence_length_padding_kernelLauncher(const float* src, float* tgt,
const int* tmp_mask_offset,
int* mask_offset, const int m,
const int n, hipStream_t stream);
template void remove_sequence_length_padding_kernelLauncher(const half* src, half* tgt,
const int* tmp_mask_offset,
int* mask_offset, const int m,
const int n, hipStream_t stream);
void update_logits(float* logits, const float* bias, const int end_id, const bool* finished,
const int m, const int n, hipStream_t stream)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */
hipLaunchKernelGGL(( update_logits_kernel<float>), dim3(grid), dim3(block), 0, stream, logits, bias, end_id, finished, n);
}
void update_logits_without_softmax(float* logits, const float* bias, const int end_id, const bool* finished,
const int m, const int n, hipStream_t stream)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */
hipLaunchKernelGGL(( update_logits_kernel_without_softmax<float>), dim3(grid), dim3(block), 0, stream, logits, bias, end_id, finished, n);
}
void update_logits_without_log(float* logits, const float* bias, const int end_id, const bool* finished,
const int m, const int n, hipStream_t stream)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */
hipLaunchKernelGGL(( update_logits_kernel_without_log<float>), dim3(grid), dim3(block), 0, stream, logits, bias, end_id, finished, n);
}
template void add_bias_act_kernelLauncher<float>(
float* out, const float* bias, int m, int n, hipStream_t stream);
template void add_bias_input_layernorm_kernelLauncher<float>(
float* out, const float* input, const float* bias, const float* gamma, const float* beta,
int m, int n, hipStream_t stream);
template void add_bias_act_kernelLauncher<half>(
half* out, const half* bias, int m, int n, hipStream_t stream);
template void add_bias_input_layernorm_kernelLauncher<half>(
half* out, const half* input, const half* bias, const half* gamma, const half* beta,
int m, int n, hipStream_t stream);
/* *********************************** Debug tools *********************************** */
template <typename T>
__global__
void print_abs_mean_kernel(const T* buf, uint size)
{
float sum;
for(int i = 0; i < size; i++)
{
sum += abs((float)buf[i]);
// printf("[INFO] buf[%d] %f \n", i, buf[i]);
}
printf("mean: %f \n", (float) sum / (float) size);
printf("sum: %f \n", sum);
}
template <typename T>
__global__
void print_kernel(const T* buf, uint size)
{
for(int i = 0; i < size; i++)
{
printf("%f ", (float(buf[i])));
}
printf("\n");
}
template <typename T>
void print_first_k(const T* buf, uint size, hipStream_t stream)
{
hipDeviceSynchronize();
check_cuda_error(hipGetLastError());
hipLaunchKernelGGL(( print_kernel), dim3(1), dim3(1), 0, stream, buf, size);
hipDeviceSynchronize();
check_cuda_error(hipGetLastError());
}
template <typename T>
void print_abs_mean(const T* buf, uint size, hipStream_t stream)
{
hipDeviceSynchronize();
check_cuda_error(hipGetLastError());
hipLaunchKernelGGL(( print_abs_mean_kernel), dim3(1), dim3(1), 0, stream, buf, size);
hipDeviceSynchronize();
check_cuda_error(hipGetLastError());
}
template void print_first_k(const float*, uint size, hipStream_t);
template void print_first_k(const half*, uint size, hipStream_t);
template void print_first_k(const int*, uint size, hipStream_t);
template void print_abs_mean(const float* buf, uint size, hipStream_t stream);
template void print_abs_mean(const half* buf, uint size, hipStream_t stream);
template void print_abs_mean(const int* buf, uint size, hipStream_t stream);
/* **************************** end of Debug tools *********************************** */
/* *************************** depreciated kernels *********************************** */
template <typename T>
__global__
void topK_kernel(const T* log_probs, int* ids, const int batch_size, const int N, const int K)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float val, max_val;
__shared__ float s_max_val;
for(int ite = 0; ite < batch_size; ++ite)
{
bool choosed = false;
val = (tid < N ) ? (float)log_probs[ite * N + tid] : -1e20f;
for(int kids = 0; kids < K; ++kids)
{
max_val = blockReduceMax<float>(val);
if(threadIdx.x == 0)
s_max_val = max_val;
__syncthreads();
if(s_max_val == val && !choosed && tid < N)
{
ids[ite * gridDim.x * K + blockIdx.x * K + kids] = tid + ite * N;
val = -1e20f;
choosed = true;
}
}
}
}
template <typename T>
__global__
void topK_kernel_2nd(const T* log_probs, int* ids, const int batch_size, const int N, const int K, const int id_offset)
{
int tid = threadIdx.x;
float val, max_val;
__shared__ float s_max_val;
__shared__ int beam_index;
__shared__ int ids_before_sort[16];
for(int ite = 0; ite < batch_size; ++ite)
{
bool choosed = false;
const int id = (tid < N) ? ids[ite * N + tid] : -1;
val = (tid < N) ? (float)log_probs[id] : -1e20f;
__syncthreads();
if(tid == 0) beam_index = 0;
if(tid < 16) ids_before_sort[tid] = -1;
__syncthreads();
while(beam_index < K){
int begin_beam_index = beam_index;
max_val = blockReduceMax<float>(val);
if(threadIdx.x == 0){
s_max_val = max_val;
}
__syncthreads();
if(s_max_val == val && !choosed && id != -1)
{
int id_offset_ = atomicAdd(&beam_index, 1);
ids_before_sort[id_offset_] = id;
val = -1e20f;
choosed = true;
}
__syncthreads();
// simply sort the ids
if(threadIdx.x == 0 && beam_index - begin_beam_index > 1){
for(int i = begin_beam_index; i < beam_index; i++){
for(int j = i; j < beam_index; j++){
if(ids_before_sort[j] < ids_before_sort[i]){
int tmpid = ids_before_sort[j];
ids_before_sort[j] = ids_before_sort[i];
ids_before_sort[i] = tmpid;
}
}
}
}
}
__syncthreads();
if(tid < K) ids[ite * K + tid] = ids_before_sort[tid];
__syncthreads();
}
}
void topK(const float* log_probs, int* ids, const int batch_size, const int beam_width, const int vocab_size,
hipStream_t stream)
{
int N = beam_width * vocab_size;
dim3 block(1024);
dim3 grid((N - 1) / block.x + 1);
/* First round topK, for each batch, get grid.x * K values */
hipLaunchKernelGGL(( topK_kernel<float>), dim3(grid), dim3(block), 0, stream, log_probs, ids, batch_size, N, beam_width);
/*Second round, for each batch, get the final TopK values out from grid.x * K values. */
hipLaunchKernelGGL(( topK_kernel_2nd<float>), dim3(1), dim3(block), 0, stream, log_probs, ids, batch_size, beam_width * grid.x, beam_width, N);
}
template <typename T>
__global__ void embedding_lookup_kernel(const T* embedding_table, const int* word_ids,
const int hidden_units, T* from_tensor)
{
int write_pos = threadIdx.x + blockIdx.x * hidden_units;
from_tensor[write_pos] = embedding_table[word_ids[blockIdx.x] * hidden_units + threadIdx.x];
}
template <typename T>
void embedding_lookup(const T* embedding_table, const int* word_ids, T* from_tensor,
const int batch_size, const int beam_width, const int hidden_units, hipStream_t stream)
{
dim3 grid(batch_size * beam_width);
dim3 block(hidden_units);
assert(hidden_units <= 1024);
hipLaunchKernelGGL(( embedding_lookup_kernel), dim3(grid), dim3(block), 0, stream, embedding_table, word_ids, hidden_units, from_tensor);
}
template<typename T>
__global__
void sine_position_encoder_kernel(T* output, int step, int n){
int tid = threadIdx.x;
int bid = blockIdx.x;
float half_n = (float)n / 2.;
// input = input * hidden_dim**0.5
output[bid * n + tid] = output[bid * n + tid] * (T)sqrtf(float(n));
float log_timescale_increment = __logf(10000) / (half_n - 1.f);
float inv_timescales = __expf( (tid % (int)half_n) * -1 * log_timescale_increment );
float scaled_time = inv_timescales * step;
T encoding_val = (tid < half_n) ? (T) __sinf(scaled_time) : (T) __cosf(scaled_time);
output[bid * n + tid] = output[bid * n + tid] + encoding_val;
}
template<typename T>
void sine_position_encoder(
T* output,
int step,
int m, int n, hipStream_t stream)
{
dim3 grid(m);
dim3 block(n);
assert(n <= 1024);
hipLaunchKernelGGL(( sine_position_encoder_kernel<T>), dim3(grid), dim3(block), 0, stream, output, step, n);
}
template void embedding_lookup(const float* embedding_table, const int* word_ids, float* from_tensor,
const int batch_size, const int beam_width, const int hidden_units, hipStream_t stream);
template void embedding_lookup(const half* embedding_table, const int* word_ids, half* from_tensor,
const int batch_size, const int beam_width, const int hidden_units, hipStream_t stream);
template void sine_position_encoder(
float* output,
int step,
int m, int n,
hipStream_t stream);
template void sine_position_encoder(
half* output,
int step,
int m, int n,
hipStream_t stream);
/* *************************** end of depreciated kernels *********************************** */
}//namespace
| bedba927e6f8433af2747d9199c8529e791a4cda.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fastertransformer/common.h"
#include "cuda_kernels.h"
#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <climits>
#include <cfloat>
namespace fastertransformer{
template <typename T>
__inline__ __device__
T gelu(T x)
{
float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x))));
return x * cdf;
}
template <>
__inline__ __device__
half2 gelu(half2 val)
{
half2 val_pow3 = __hmul2(val, __hmul2(val, val));
float2 tmp_pow = __half22float2(val_pow3);
float2 tmp = __half22float2(val);
tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x))));
tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y))));
return __hmul2(val, __float22half2_rn(tmp));
}
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f;
val = warpReduceSum(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax<T>(val);
return val;
}
template <typename T>
__global__
void add_bias_act(T* out, const T* bias, int m, int n)
{
T val, reg_bias;
int row_id = blockIdx.x;
int ite = n / blockDim.x;
int tid = threadIdx.x;
for(int i = 0; i < ite; ++i)
{
reg_bias = __ldg(&bias[i * blockDim.x + tid]);
row_id = blockIdx.x;
while(row_id < m){
val = out[tid + i * blockDim.x + row_id * n]+ reg_bias;
out[tid + i * blockDim.x + row_id * n] = gelu<T>(val);
row_id += gridDim.x;
}
}
}
template <>
__global__
void add_bias_act(half* out, const half* bias, int m, int n)
{
half2 val, reg_bias;
int row_id = blockIdx.x;
int ite = n / blockDim.x / 2;
int tid = threadIdx.x;
half2* out_ptr = (half2*) out;
const half2* bias_ptr = (half2*) bias;
for(int i = 0; i < ite; ++i)
{
reg_bias = __ldg(&bias_ptr[i * blockDim.x + tid]);
row_id = blockIdx.x;
while(row_id < m){
val = out_ptr[tid + i * blockDim.x + row_id * n / 2];
val = __hadd2(val, reg_bias);
out_ptr[tid + i * blockDim.x + row_id * n / 2] = gelu<half2>(val);
row_id += gridDim.x;
}
}
}
template <typename T>
__global__
void add_bias_input_layernorm(T* out, const T* input, const T* bias, const T* gamma, const T* beta, int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = 0.0f;
local_out += (float)(out[blockIdx.x * n + tid] + input[blockIdx.x * n + tid] + __ldg(&bias[tid]));
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>((local_out - s_mean) * (local_out - s_mean));
if(threadIdx.x == 0)
s_variance = variance / n + 1e-6f;
__syncthreads();
out[blockIdx.x * n + tid] =
(T)(((local_out - s_mean) * rsqrtf(s_variance)) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid])));
}
template <>
__global__
void add_bias_input_layernorm(half* out, const half* input, const half* bias,
const half* gamma, const half* beta, int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
half2* out_ptr = (half2*)out;
const half2* input_ptr = (const half2*)input;
const half2* bias_ptr = (const half2*)bias;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
float local_out = 0.0f;
int id = blockIdx.x * n / 2 + tid;
local_out_fp2 = __half22float2(__hadd2(__hadd2(out_ptr[id], input_ptr[id]), __ldg(&bias_ptr[tid])));
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean);
variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean);
variance = blockReduceSum<float>(variance);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6f);
__syncthreads();
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
out_ptr[id] = __float22half2_rn(local_out_fp2);
}
template <typename T>
__global__
void add_bias_input_layernorm_v2(T* out, const T* __restrict input, const T* __restrict bias,
const T* __restrict gamma, const T* __restrict beta, int n)
{
const int ite = 4;
const int tid = threadIdx.x;
const int bid = blockIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out[ite];
float sum = 0.0f;
#pragma unroll
for(int i = 0; i < ite; i++)
{
int col_id = i * blockDim.x + tid;
int id = bid * n + col_id;
local_out[i] = (float)(out[id] + __ldg(&input[id]) + __ldg(&bias[col_id]));
sum += local_out[i];
}
mean = blockReduceSum<float>(sum);
if(tid == 0)
s_mean = mean / n;
__syncthreads();
float var = 0.0f;
#pragma unroll
for(int i = 0; i < ite; i++)
{
float diff = local_out[i] - s_mean;
var += diff * diff;
}
variance = blockReduceSum<float>(var);
if(tid == 0)
s_variance = rsqrtf(variance / n + 1e-6f);
__syncthreads();
#pragma unroll
for(int i = 0; i < ite; i++)
{
int col_id = i * blockDim.x + tid;
int id = bid * n + col_id;
out[id] = (T)((local_out[i] - s_mean) * s_variance * (float)__ldg(&gamma[col_id]) + (float)__ldg(&beta[col_id]));
}
}
template <>
__global__
void add_bias_input_layernorm_v2(half* out, const half* __restrict input, const half* __restrict bias,
const half* __restrict gamma, const half* __restrict beta, int n)
{
const int ite = 4;
const int tid = threadIdx.x;
const int bid = blockIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
half2 local_out_half2[ite];
half2* out_ptr = (half2*)out;
const half2* input_ptr = (const half2*)input;
const half2* bias_ptr = (const half2*)bias;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
// float sum = 0.0f;
half2 sum = __float2half2_rn(0.0f);
#pragma unroll
for(int i = 0; i < ite; i++)
{
int col_id = i * blockDim.x + tid;
int id = bid * n / 2 + col_id;
local_out_half2[i] = out_ptr[id] + __ldg(&input_ptr[id]) + __ldg(&bias_ptr[col_id]);
sum += local_out_half2[i];
}
mean = blockReduceSum<float>((float)(sum.x + sum.y));
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
float var = 0.0f;
half2 s_mean_2 = __float2half2_rn(s_mean);
#pragma unroll
for(int i = 0; i < ite; i++)
{
local_out_half2[i] = local_out_half2[i] - s_mean_2;
float v1 = (float)local_out_half2[i].x;
float v2 = (float)local_out_half2[i].y;
var += v1 * v1 + v2 * v2;
}
variance = blockReduceSum<float>(var);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6f);
__syncthreads();
half2 s_var_2 = __float2half2_rn(s_variance);
#pragma unroll
for(int i = 0; i < ite; i++)
{
int col_id = i * blockDim.x + tid;
int id = bid * n / 2 + col_id;
out_ptr[id] = local_out_half2[i] * s_var_2 * __ldg(&gamma_ptr[col_id]) + __ldg(&beta_ptr[col_id]);
}
}
template <typename T>
void add_bias_act_kernelLauncher(T* out, const T* bias, int m, int n, cudaStream_t stream)
{
dim3 grid(ceil(m / 4.));
dim3 block(n / 4);
assert(block.x <= 1024);
add_bias_act<T><<<grid, block, 0, stream>>>(out, bias, m, n);
}
template<typename T>
void add_bias_input_layernorm_kernelLauncher(T* out, const T* input, const T* bias,
const T* gamma, const T* beta, int m, int n, cudaStream_t stream)
{
dim3 grid(m);
dim3 block(n);
assert(n <= 1024);
if(n == 768 || n == 1024)
add_bias_input_layernorm_v2<T><<<grid, n / 4, 0, stream>>>(out, input, bias, gamma, beta, n);
else
add_bias_input_layernorm<T><<<grid, block, 0, stream>>>(out, input, bias, gamma, beta, m, n);
}
template <>
void add_bias_input_layernorm_kernelLauncher(half* out, const half* input, const half* bias,
const half* gamma, const half* beta, int m, int n, cudaStream_t stream)
{
dim3 grid(m);
dim3 block(n / 2);
assert(n / 2 <= 1024);
if(m >= 512 && (n == 768 || n == 1024))
add_bias_input_layernorm_v2<half><<<grid, n / 8, 0, stream>>>(out, input, bias, gamma, beta, n);
else
add_bias_input_layernorm<half><<<grid, block, 0, stream>>>(out, input, bias, gamma, beta, m, n);
}
template <typename T>
__global__ void update_logits_kernel(T* logits, const T* bias, const int end_id, const bool* finished, const int n)
{
int bid = blockIdx.x;
bool finish = finished[bid];
int offset = bid * n;
float max_val = -1 * FLT_MAX;
__shared__ float s_max_val;
__shared__ float s_sum_val;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
if(finish)
logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX;
else
logits[offset + tid] += bias[tid];
max_val = max(max_val, logits[offset + tid]);
}
max_val = blockReduceMax<float>((float)max_val);
if(threadIdx.x == 0)
s_max_val = max_val;
__syncthreads();
float sum_val = 0.0f;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
logits[offset + tid] = __expf((float)logits[offset + tid] - s_max_val);
sum_val += (float)logits[offset + tid];
}
sum_val = blockReduceSum<float>(sum_val);
if(threadIdx.x == 0)
s_sum_val = sum_val;
__syncthreads();
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
logits[offset + tid] = logf((float)logits[offset + tid] / s_sum_val);
}
}
template <typename T>
__global__ void update_logits_kernel_without_softmax(T* logits, const T* bias, const int end_id, const bool* finished, const int n)
{
int bid = blockIdx.x;
bool finish = finished[bid];
int offset = bid * n;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
if(finish)
logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX;
else
logits[offset + tid] += bias[tid];
}
}
template <typename T>
__global__ void update_logits_kernel_without_log(T* logits, const T* bias, const int end_id, const bool* finished, const int n)
{
int bid = blockIdx.x;
bool finish = finished[bid];
int offset = bid * n;
float max_val = -1 * FLT_MAX;
__shared__ float s_max_val;
__shared__ float s_sum_val;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
if(finish)
logits[offset + tid] = (tid == end_id) ? FLT_MAX : -1 * FLT_MAX;
else
logits[offset + tid] += bias[tid];
max_val = max(max_val, logits[offset + tid]);
}
max_val = blockReduceMax<float>((float)max_val);
if(threadIdx.x == 0)
s_max_val = max_val;
__syncthreads();
float sum_val = 0.0f;
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
logits[offset + tid] = __expf((float)logits[offset + tid] - s_max_val);
sum_val += (float)logits[offset + tid];
}
sum_val = blockReduceSum<float>(sum_val);
if(threadIdx.x == 0)
s_sum_val = sum_val;
__syncthreads();
for(int tid = threadIdx.x; tid < n; tid += blockDim.x)
{
logits[offset + tid] = ((float)logits[offset + tid] / s_sum_val);
}
}
template<typename T>
__global__ void remove_sequence_length_padding(const T* src, T* tgt,
const int* tmp_mask_offset,
int* mask_offset,
const int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
mask_offset[bid] = tmp_mask_offset[bid];
const int src_seq_id = bid + mask_offset[bid];
const int tgt_seq_id = bid;
for(int i = tid; i < n; i += blockDim.x)
{
tgt[tgt_seq_id * n + i] = src[src_seq_id * n + i];
}
}
template<typename T>
void remove_sequence_length_padding_kernelLauncher(const T* src, T* tgt,
const int* tmp_mask_offset,
int* mask_offset,
const int m, const int n, cudaStream_t stream)
{
// src: [batch_size*max_seq_len, hidden_dim]
// tgt: [valid_word_num, hidden_dim]
remove_sequence_length_padding<<<m, 256, 0, stream>>>(src, tgt, tmp_mask_offset, mask_offset, n);
}
template<typename T>
__global__ void rebuild_sequence_length_padding(const T* src, T* tgt,
const int* mask_offset,
const int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int tgt_seq_id = bid + mask_offset[bid];
const int src_seq_id = bid;
for(int i = tid; i < n; i += blockDim.x)
{
tgt[tgt_seq_id * n + i] = src[src_seq_id * n + i];
}
}
template<typename T>
void rebuild_sequence_length_padding_kernelLauncher(const T* src, T* tgt,
const int* mask_offset, const int m,
const int n, cudaStream_t stream)
{
// src: [valid_word_num, hidden_dim]
// tgt: [batch_size*max_seq_len, hidden_dim]
rebuild_sequence_length_padding<<<m, 256, 0, stream>>>(src, tgt, mask_offset, n);
}
__global__ void build_sequence_length_padding_offset(const int* sequence_length,
const int batch_size, const int max_seq_len, int* valid_word_num, int* tmp_mask_offset)
{
// do cumulated sum
int total_seq_len = 0;
int cum_offset = 0;
int index = 0;
for(int i = 0; i < batch_size; i++)
{
const int seq_len = sequence_length[i];
for(int j = 0; j < seq_len; j++)
{
tmp_mask_offset[index] = cum_offset;
index++;
}
cum_offset += max_seq_len - seq_len;
total_seq_len += seq_len;
}
valid_word_num[0] = total_seq_len;
}
void build_sequence_length_padding_offset_kernelLauncher(const int* sequence_length,
const int batch_size, const int max_seq_len, int* valid_word_num, int* tmp_mask_offset,
cudaStream_t stream)
{
build_sequence_length_padding_offset<<<1, 1, 0, stream>>>(sequence_length,
batch_size, max_seq_len, valid_word_num, tmp_mask_offset);
}
template void rebuild_sequence_length_padding_kernelLauncher(const float* src, float* tgt,
const int* mask_offset, const int m,
const int n, cudaStream_t stream);
template void rebuild_sequence_length_padding_kernelLauncher(const half* src, half* tgt,
const int* mask_offset, const int m,
const int n, cudaStream_t stream);
template void remove_sequence_length_padding_kernelLauncher(const float* src, float* tgt,
const int* tmp_mask_offset,
int* mask_offset, const int m,
const int n, cudaStream_t stream);
template void remove_sequence_length_padding_kernelLauncher(const half* src, half* tgt,
const int* tmp_mask_offset,
int* mask_offset, const int m,
const int n, cudaStream_t stream);
void update_logits(float* logits, const float* bias, const int end_id, const bool* finished,
const int m, const int n, cudaStream_t stream)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */
update_logits_kernel<float><<<grid, block, 0, stream>>>(logits, bias, end_id, finished, n);
}
void update_logits_without_softmax(float* logits, const float* bias, const int end_id, const bool* finished,
const int m, const int n, cudaStream_t stream)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */
update_logits_kernel_without_softmax<float><<<grid, block, 0, stream>>>(logits, bias, end_id, finished, n);
}
void update_logits_without_log(float* logits, const float* bias, const int end_id, const bool* finished,
const int m, const int n, cudaStream_t stream)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/*n is the vocab_size, e.g., 30000, 7000.... vocab_size is usually very big. */
update_logits_kernel_without_log<float><<<grid, block, 0, stream>>>(logits, bias, end_id, finished, n);
}
template void add_bias_act_kernelLauncher<float>(
float* out, const float* bias, int m, int n, cudaStream_t stream);
template void add_bias_input_layernorm_kernelLauncher<float>(
float* out, const float* input, const float* bias, const float* gamma, const float* beta,
int m, int n, cudaStream_t stream);
template void add_bias_act_kernelLauncher<half>(
half* out, const half* bias, int m, int n, cudaStream_t stream);
template void add_bias_input_layernorm_kernelLauncher<half>(
half* out, const half* input, const half* bias, const half* gamma, const half* beta,
int m, int n, cudaStream_t stream);
/* *********************************** Debug tools *********************************** */
template <typename T>
__global__
void print_abs_mean_kernel(const T* buf, uint size)
{
float sum;
for(int i = 0; i < size; i++)
{
sum += abs((float)buf[i]);
// printf("[INFO] buf[%d] %f \n", i, buf[i]);
}
printf("mean: %f \n", (float) sum / (float) size);
printf("sum: %f \n", sum);
}
template <typename T>
__global__
void print_kernel(const T* buf, uint size)
{
for(int i = 0; i < size; i++)
{
printf("%f ", (float(buf[i])));
}
printf("\n");
}
template <typename T>
void print_first_k(const T* buf, uint size, cudaStream_t stream)
{
cudaDeviceSynchronize();
check_cuda_error(cudaGetLastError());
print_kernel<<<1, 1, 0, stream>>>(buf, size);
cudaDeviceSynchronize();
check_cuda_error(cudaGetLastError());
}
template <typename T>
void print_abs_mean(const T* buf, uint size, cudaStream_t stream)
{
cudaDeviceSynchronize();
check_cuda_error(cudaGetLastError());
print_abs_mean_kernel<<<1, 1, 0, stream>>>(buf, size);
cudaDeviceSynchronize();
check_cuda_error(cudaGetLastError());
}
template void print_first_k(const float*, uint size, cudaStream_t);
template void print_first_k(const half*, uint size, cudaStream_t);
template void print_first_k(const int*, uint size, cudaStream_t);
template void print_abs_mean(const float* buf, uint size, cudaStream_t stream);
template void print_abs_mean(const half* buf, uint size, cudaStream_t stream);
template void print_abs_mean(const int* buf, uint size, cudaStream_t stream);
/* **************************** end of Debug tools *********************************** */
/* *************************** depreciated kernels *********************************** */
template <typename T>
__global__
void topK_kernel(const T* log_probs, int* ids, const int batch_size, const int N, const int K)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float val, max_val;
__shared__ float s_max_val;
for(int ite = 0; ite < batch_size; ++ite)
{
bool choosed = false;
val = (tid < N ) ? (float)log_probs[ite * N + tid] : -1e20f;
for(int kids = 0; kids < K; ++kids)
{
max_val = blockReduceMax<float>(val);
if(threadIdx.x == 0)
s_max_val = max_val;
__syncthreads();
if(s_max_val == val && !choosed && tid < N)
{
ids[ite * gridDim.x * K + blockIdx.x * K + kids] = tid + ite * N;
val = -1e20f;
choosed = true;
}
}
}
}
template <typename T>
__global__
void topK_kernel_2nd(const T* log_probs, int* ids, const int batch_size, const int N, const int K, const int id_offset)
{
int tid = threadIdx.x;
float val, max_val;
__shared__ float s_max_val;
__shared__ int beam_index;
__shared__ int ids_before_sort[16];
for(int ite = 0; ite < batch_size; ++ite)
{
bool choosed = false;
const int id = (tid < N) ? ids[ite * N + tid] : -1;
val = (tid < N) ? (float)log_probs[id] : -1e20f;
__syncthreads();
if(tid == 0) beam_index = 0;
if(tid < 16) ids_before_sort[tid] = -1;
__syncthreads();
while(beam_index < K){
int begin_beam_index = beam_index;
max_val = blockReduceMax<float>(val);
if(threadIdx.x == 0){
s_max_val = max_val;
}
__syncthreads();
if(s_max_val == val && !choosed && id != -1)
{
int id_offset_ = atomicAdd(&beam_index, 1);
ids_before_sort[id_offset_] = id;
val = -1e20f;
choosed = true;
}
__syncthreads();
// simply sort the ids
if(threadIdx.x == 0 && beam_index - begin_beam_index > 1){
for(int i = begin_beam_index; i < beam_index; i++){
for(int j = i; j < beam_index; j++){
if(ids_before_sort[j] < ids_before_sort[i]){
int tmpid = ids_before_sort[j];
ids_before_sort[j] = ids_before_sort[i];
ids_before_sort[i] = tmpid;
}
}
}
}
}
__syncthreads();
if(tid < K) ids[ite * K + tid] = ids_before_sort[tid];
__syncthreads();
}
}
void topK(const float* log_probs, int* ids, const int batch_size, const int beam_width, const int vocab_size,
cudaStream_t stream)
{
int N = beam_width * vocab_size;
dim3 block(1024);
dim3 grid((N - 1) / block.x + 1);
/* First round topK, for each batch, get grid.x * K values */
topK_kernel<float><<<grid, block, 0, stream>>>(log_probs, ids, batch_size, N, beam_width);
/*Second round, for each batch, get the final TopK values out from grid.x * K values. */
topK_kernel_2nd<float><<<1, block, 0, stream>>>(log_probs, ids, batch_size, beam_width * grid.x, beam_width, N);
}
template <typename T>
__global__ void embedding_lookup_kernel(const T* embedding_table, const int* word_ids,
const int hidden_units, T* from_tensor)
{
int write_pos = threadIdx.x + blockIdx.x * hidden_units;
from_tensor[write_pos] = embedding_table[word_ids[blockIdx.x] * hidden_units + threadIdx.x];
}
template <typename T>
void embedding_lookup(const T* embedding_table, const int* word_ids, T* from_tensor,
const int batch_size, const int beam_width, const int hidden_units, cudaStream_t stream)
{
dim3 grid(batch_size * beam_width);
dim3 block(hidden_units);
assert(hidden_units <= 1024);
embedding_lookup_kernel<<<grid, block, 0, stream>>>(embedding_table, word_ids, hidden_units, from_tensor);
}
template<typename T>
__global__
void sine_position_encoder_kernel(T* output, int step, int n){
int tid = threadIdx.x;
int bid = blockIdx.x;
float half_n = (float)n / 2.;
// input = input * hidden_dim**0.5
output[bid * n + tid] = output[bid * n + tid] * (T)sqrtf(float(n));
float log_timescale_increment = __logf(10000) / (half_n - 1.f);
float inv_timescales = __expf( (tid % (int)half_n) * -1 * log_timescale_increment );
float scaled_time = inv_timescales * step;
T encoding_val = (tid < half_n) ? (T) __sinf(scaled_time) : (T) __cosf(scaled_time);
output[bid * n + tid] = output[bid * n + tid] + encoding_val;
}
template<typename T>
void sine_position_encoder(
T* output,
int step,
int m, int n, cudaStream_t stream)
{
dim3 grid(m);
dim3 block(n);
assert(n <= 1024);
sine_position_encoder_kernel<T><<<grid, block, 0, stream>>>(output, step, n);
}
template void embedding_lookup(const float* embedding_table, const int* word_ids, float* from_tensor,
const int batch_size, const int beam_width, const int hidden_units, cudaStream_t stream);
template void embedding_lookup(const half* embedding_table, const int* word_ids, half* from_tensor,
const int batch_size, const int beam_width, const int hidden_units, cudaStream_t stream);
template void sine_position_encoder(
float* output,
int step,
int m, int n,
cudaStream_t stream);
template void sine_position_encoder(
half* output,
int step,
int m, int n,
cudaStream_t stream);
/* *************************** end of depreciated kernels *********************************** */
}//namespace
|
8c2ff3c2db57f923c2d465344b0c2fd406983185.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
//#define BIN_COUNT 8
#define BLOCK_SIZE 64
#define N 512
__global__ void histogram(int* d_in, int* d_bins, const int BIN_COUNT)
{
// Storing the global thread id
int gid = threadIdx.x + (blockDim.x * blockIdx.x);
// Storing the bin number
int whatBin = d_in[gid] % BIN_COUNT;
// Atomic add on the bin in global memory (incrementing by one).
atomicAdd(&(d_bins[whatBin]), 1);
}
// This funciton sets up the histogram kernal
void histogramMiddle(int* h_input, int* h_bins) {
// Creating pointers for the device input and bin storage
int* d_input, * d_bins;
// Initializing the error variable
hipError_t err;
// Intalizing variables.
int noThreads = BLOCK_SIZE;
int noBlocks = N / BLOCK_SIZE;
int BIN_COUNT = 8;
// Allocating the input and bin on the device.
err = hipMalloc((void**)&d_input, sizeof(int) * N);
printf("\n Allocating d_input error %s \n", hipGetErrorString(err));
err = hipMalloc((void**)&d_bins, sizeof(int) * (N / BLOCK_SIZE));
printf("\n Allocating d_bins error %s \n", hipGetErrorString(err));
// Copying the data to the GPU;
err = hipMemcpy(d_input, h_input, sizeof(int) * N, hipMemcpyHostToDevice);
printf("\n Copying input data from CPU -> GPU error: %s \n", hipGetErrorString(err));
// Now call the Kernal
hipLaunchKernelGGL(( histogram), dim3(noBlocks), dim3(noThreads) , 0, 0, d_input, d_bins,BIN_COUNT);
err = hipDeviceSynchronize();
printf("\n Kernel error: %s \n", hipGetErrorString(err));
// Time to copy the bins back into the CPU
err = hipMemcpy(h_bins, d_bins, sizeof(int) * (N / BLOCK_SIZE), hipMemcpyDeviceToHost);
printf("\n Copying bins from GPU -> CPU error: %s \n", hipGetErrorString(err));
for (int i = 0; i < (N / BLOCK_SIZE); i++) {
printf("\n bin_id %i %i \n ",i%BLOCK_SIZE,h_bins[i]);
}
return;
}
int main(void)
{
// Initalizing the arrays
int* input = new int[N];
int* bins = new int[N % BLOCK_SIZE];
// Putting in values
// The way this is being set up [0] = 0; [255] = 255; [N] = N;
for (int i = 0; i < N; i++) {
input[i] = i;
}
// Calling the function that prepares the kernal.
histogramMiddle(input, bins);
}
| 8c2ff3c2db57f923c2d465344b0c2fd406983185.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#include <iostream>
//#define BIN_COUNT 8
#define BLOCK_SIZE 64
#define N 512
__global__ void histogram(int* d_in, int* d_bins, const int BIN_COUNT)
{
// Storing the global thread id
int gid = threadIdx.x + (blockDim.x * blockIdx.x);
// Storing the bin number
int whatBin = d_in[gid] % BIN_COUNT;
// Atomic add on the bin in global memory (incrementing by one).
atomicAdd(&(d_bins[whatBin]), 1);
}
// This funciton sets up the histogram kernal
void histogramMiddle(int* h_input, int* h_bins) {
// Creating pointers for the device input and bin storage
int* d_input, * d_bins;
// Initializing the error variable
cudaError_t err;
// Intalizing variables.
int noThreads = BLOCK_SIZE;
int noBlocks = N / BLOCK_SIZE;
int BIN_COUNT = 8;
// Allocating the input and bin on the device.
err = cudaMalloc((void**)&d_input, sizeof(int) * N);
printf("\n Allocating d_input error %s \n", cudaGetErrorString(err));
err = cudaMalloc((void**)&d_bins, sizeof(int) * (N / BLOCK_SIZE));
printf("\n Allocating d_bins error %s \n", cudaGetErrorString(err));
// Copying the data to the GPU;
err = cudaMemcpy(d_input, h_input, sizeof(int) * N, cudaMemcpyHostToDevice);
printf("\n Copying input data from CPU -> GPU error: %s \n", cudaGetErrorString(err));
// Now call the Kernal
histogram<<< noBlocks, noThreads >>>(d_input, d_bins,BIN_COUNT);
err = cudaDeviceSynchronize();
printf("\n Kernel error: %s \n", cudaGetErrorString(err));
// Time to copy the bins back into the CPU
err = cudaMemcpy(h_bins, d_bins, sizeof(int) * (N / BLOCK_SIZE), cudaMemcpyDeviceToHost);
printf("\n Copying bins from GPU -> CPU error: %s \n", cudaGetErrorString(err));
for (int i = 0; i < (N / BLOCK_SIZE); i++) {
printf("\n bin_id %i %i \n ",i%BLOCK_SIZE,h_bins[i]);
}
return;
}
int main(void)
{
// Initalizing the arrays
int* input = new int[N];
int* bins = new int[N % BLOCK_SIZE];
// Putting in values
// The way this is being set up [0] = 0; [255] = 255; [N] = N;
for (int i = 0; i < N; i++) {
input[i] = i;
}
// Calling the function that prepares the kernal.
histogramMiddle(input, bins);
}
|
4fb5b2accb0a2cf2a5e9ed429f8653fca54e9005.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void addtoall(int* a, int b)
{
int i = threadIdx.x;
atomicAdd(&(a[i]), b);
}
int main(void)
{
int N = 32;
int *A = new int[N];
int *d_A;
hipMalloc((void**)&d_A, N*sizeof(int));
hipMemcpy(d_A, A, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addtoall), dim3(1),dim3(N), 0, 0, d_A, 7);
hipLaunchKernelGGL(( addtoall), dim3(1),dim3(N), 0, 0, d_A, 3);
hipLaunchKernelGGL(( addtoall), dim3(1),dim3(N), 0, 0, d_A, 3);
hipLaunchKernelGGL(( addtoall), dim3(1),dim3(N), 0, 0, d_A, 3);
hipLaunchKernelGGL(( addtoall), dim3(1),dim3(N), 0, 0, d_A, 3);
hipLaunchKernelGGL(( addtoall), dim3(1),dim3(N), 0, 0, d_A, 3);
hipLaunchKernelGGL(( addtoall), dim3(1),dim3(N), 0, 0, d_A, 3);
hipMemcpy(A, d_A, N*sizeof(int), hipMemcpyDeviceToHost);
for(int i =0; i<N; i++)
{
printf("%d ", A[i]);
}
}
| 4fb5b2accb0a2cf2a5e9ed429f8653fca54e9005.cu | #include <stdio.h>
__global__ void addtoall(int* a, int b)
{
int i = threadIdx.x;
atomicAdd(&(a[i]), b);
}
int main(void)
{
int N = 32;
int *A = new int[N];
int *d_A;
cudaMalloc((void**)&d_A, N*sizeof(int));
cudaMemcpy(d_A, A, N*sizeof(int), cudaMemcpyHostToDevice);
addtoall<<<1,N>>>(d_A, 7);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
addtoall<<<1,N>>>(d_A, 3);
cudaMemcpy(A, d_A, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i =0; i<N; i++)
{
printf("%d ", A[i]);
}
}
|
831fe6c6038d0a7fd9a7cbe5cc68ba7f4c16902c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include "needle.h"
#include <stdio.h>
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
__device__ __host__ int
maximum( int a,
int b,
int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void
needle_cuda_shared_1( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
if (tx == 0)
temp[tx][0] = matrix_cuda[index_nw];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
__syncthreads();
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
__global__ void
needle_cuda_shared_2( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i ;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
__syncthreads();
if (tx == 0)
temp[tx][0] = matrix_cuda[index_nw];
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
| 831fe6c6038d0a7fd9a7cbe5cc68ba7f4c16902c.cu | #include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include "needle.h"
#include <stdio.h>
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
__device__ __host__ int
maximum( int a,
int b,
int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void
needle_cuda_shared_1( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
if (tx == 0)
temp[tx][0] = matrix_cuda[index_nw];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
__syncthreads();
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
__global__ void
needle_cuda_shared_2( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i ;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
__syncthreads();
if (tx == 0)
temp[tx][0] = matrix_cuda[index_nw];
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
__syncthreads();
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.